Пример #1
0
    def connect(self):
        '''
        Prompts for csv file with info we need to upload.
        Checks if files to upload exist.
        Connects to Blackfynn API and activates top level dataset destination.
        Returns csv file name, destination dataset.
        '''
        try:
            b_fynn = Blackfynn(self.working_profile)
        except Exception as ex:
            sys.exit('Error Connecting to ' + 'Blackfynn. ' + str(ex))

        try:
            print("Trying to connect to dataset.", flush=True)
            dataset = b_fynn.get_dataset(self.dataset_name)
        except Exception as ex:
            sys.exit('Unable to connect to the dataset.' + str(ex))

        print()
        print('Dataset:  {}\nProfile:  {}'.format(data_set,
                                                  self.working_profile))

        prompt = input('Continue with upload (y/n)? ')
        if prompt != 'y':
            sys.exit('Aborting upload.')
        return dataset
Пример #2
0
def lambda_handler(event, context):
    DATASET = os.environ['DASHBOARD_DATASET_NAME']

    bf = Blackfynn()
    ds = bf.get_dataset(DATASET)

    update(bf, ds)

    return {
        'statusCode': 200,
        'body': json.dumps('Successfully updated the SPARC dashboard')
    }
Пример #3
0
def blackfynn_get(dataset='Timeseries Dataset',collection=0,channels='sine 50 Hz'):

   #establish connection with the API
    bf = Blackfynn(api_token='**********************************',api_secret='**********************************',)
    ds = bf.get_dataset(dataset)
    print(ds)

     # get all timeseries collections
    ts = []
    for ds_temp in ds:
        ts.append(ds_temp)

    
    # NOTE: THIS NEEDS TO BE MADE MODULAR
    data = ts[collection].get_data(length='1s')

    # take the data from the channel and process it to be read by javascript

    #process y values
    temp = np.asarray(data[channels])
    java_y = []
    for el in temp:
        java_y.append(float(str(el)))

    #generate x values
    #NOTE: CURRENTLY NOT MODULAR
    temp2 = np.linspace(0,1,len(temp))
    java_x =[]
    for el in temp2:
        java_x.append(el)

    #write to file
    f = open('data.js', 'w')
    f.write('arbData = ')
    f.write(str(java_y))
    f.write('\ndate = ')
    f.write(repr(java_x))
    f.close()

    #update gitub
    g = git_hub()
    g.update()

    print('update ran successfully')
Пример #4
0
def blackfynn_get():
    # make a blackfynn API call according to threeWrapper parameters

    fpipe = FilePipe()
    params = fpipe.receive()

    api_key = params['api_key']
    api_secret = params['api_secret']
    dataset = params['dataset']
    collection = params['collection']
    channels = params['channels']
    window_from_start = params['window_from_start']
    start = params['start']
    end = params['end']
    error = 0

    # Process function input:
    if start == -1 or end == -1:
        has_time_window = False
    else:
        has_time_window = True

    #establish connection with the API
    try:
        bf = Blackfynn(api_token=api_key, api_secret=api_secret)
    except:
        fpipe.send({
            'error':
            'Could not connect to the Blackfynn API. Check your API key and internet connection'
        })
        return

    # get all timeseries collections
    try:
        ds = bf.get_dataset(dataset)
        tstemp = ds.get_items_by_name(collection)
        time_series = tstemp[0]
    except:
        fpipe.send({
            'error':
            'Could not find the requested Dataset and Collection. Please check your names if you have not already'
        })
        return

    # Get data for all channels according to the length set by parameters

    if has_time_window == False:
        data = time_series.get_data(length=(str(window_from_start) + 's'))
    else:
        data = time_series.get_data(start=start, end=end)

    # take the data from the channel and process it to be passed into binary (in filePipe)

    # process y values
    list_y = data[channels].values.tolist()

    # generate x values
    if has_time_window:
        time = np.linspace(start, end, len(list_y))
    else:
        time = np.linspace(0, window_from_start, len(list_y))
    list_x = time.tolist()

    cache_dict = create_file_cache(data)
    output = {'x': list_x, 'y': list_y, 'cache': cache_dict, 'error': False}

    fpipe.send(output)
    print('update ran successfully')
Пример #5
0
class DatcoreClient(object):
    def __init__(self,
                 api_token=None,
                 api_secret=None,
                 host=None,
                 streaming_host=None):
        # WARNING: contruction raise exception if service is not available.
        # Use datacore_wrapper for safe calls
        # TODO: can use https://developer.blackfynn.io/python/latest/configuration.html#environment-variables
        self._bf = Blackfynn(
            profile=None,
            api_token=api_token,
            api_secret=api_secret,
            host=host,
            streaming_host=streaming_host,
        )

    def profile(self):
        """
        Returns profile of current User
        """
        return self._bf.profile

    def _collection_from_destination(self, destination: str):
        destination_path = Path(destination)
        parts = destination_path.parts

        dataset_name = parts[0]
        dataset = self.get_dataset(dataset_name)
        if dataset is None:
            return None, None

        collection_id = dataset.id
        collection = dataset
        collections = []
        if len(parts) > 1:
            object_path = Path(*parts[1:])
            collections = list(object_path.parts)
            collection_id = ""
            collection_id = _get_collection_id(dataset, collections,
                                               collection_id)
            collection = self._bf.get(collection_id)

        return collection, collection_id

    def _destination_from_id(self, destination_id: str):
        # NOTE: .get(*) logs
        #  INFO:blackfynn.client.Blackfynn:Unable to retrieve object
        # if destination_id refers to a Dataset

        destination: Union[DataPackage,
                           Collection] = self._bf.get(destination_id)
        if destination is None:
            destination: Dataset = self._bf.get_dataset(destination_id)

        return destination

    def list_files_recursively(self, dataset_filter: str = ""):
        files = []

        for dataset in self._bf.datasets():
            if not dataset_filter or dataset_filter in dataset.name:
                self.list_dataset_files_recursively(files, dataset,
                                                    Path(dataset.name))

        return files

    def list_files_raw_dataset(self, dataset_id: str) -> List[FileMetaDataEx]:
        files = []  # raw packages
        _files = []  # fmds
        data = {}  # map to keep track of parents-child

        cursor = ""
        page_size = 1000
        api = self._bf._api.datasets

        dataset = self._bf.get_dataset(dataset_id)
        if dataset is not None:
            while True:
                resp = api._get(
                    api._uri(
                        "/{id}/packages?cursor={cursor}&pageSize={pageSize}&includeSourceFiles={includeSourceFiles}",
                        id=dataset_id,
                        cursor=cursor,
                        pageSize=page_size,
                        includeSourceFiles=False,
                    ))
                for package in resp.get("packages", list()):
                    id = package["content"]["id"]
                    data[id] = package
                    files.append(package)
                cursor = resp.get("cursor")
                if cursor is None:
                    break

            for f in files:
                if f["content"]["packageType"] != "Collection":
                    filename = f["content"]["name"]
                    file_path = ""
                    file_id = f["content"]["nodeId"]
                    _f = f
                    while "parentId" in _f["content"].keys():
                        parentid = _f["content"]["parentId"]
                        _f = data[parentid]
                        file_path = _f["content"]["name"] + "/" + file_path

                    bucket_name = dataset.name
                    file_name = filename
                    file_size = 0
                    object_name = str(Path(file_path) / file_name)

                    file_uuid = str(Path(bucket_name) / object_name)
                    created_at = f["content"]["createdAt"]
                    last_modified = f["content"]["updatedAt"]
                    parent_id = dataset_id
                    if "parentId" in f["content"]:
                        parentId = f["content"]["parentId"]
                        parent_id = data[parentId]["content"]["nodeId"]

                    fmd = FileMetaData(
                        bucket_name=bucket_name,
                        file_name=file_name,
                        object_name=object_name,
                        location=DATCORE_STR,
                        location_id=DATCORE_ID,
                        file_uuid=file_uuid,
                        file_id=file_id,
                        raw_file_path=file_uuid,
                        display_file_path=file_uuid,
                        created_at=created_at,
                        last_modified=last_modified,
                        file_size=file_size,
                    )
                    fmdx = FileMetaDataEx(fmd=fmd, parent_id=parent_id)
                    _files.append(fmdx)

        return _files

    def list_files_raw(self, dataset_filter: str = "") -> List[FileMetaDataEx]:
        _files = []

        for dataset in self._bf.datasets():
            _files = _files + self.list_files_raw_dataset(dataset.id)

        return _files

    def list_dataset_files_recursively(self, files: List[FileMetaData],
                                       base: BaseCollection,
                                       current_root: Path):
        for item in base:
            if isinstance(item, Collection):
                _current_root = current_root / Path(item.name)
                self.list_dataset_files_recursively(files, item, _current_root)
            else:
                parts = current_root.parts
                bucket_name = parts[0]
                file_name = item.name
                file_size = 0
                # lets assume we have only one file
                if item.files:
                    file_name = Path(
                        item.files[0].as_dict()["content"]["s3key"]).name
                    file_size = item.files[0].as_dict()["content"]["size"]
                # if this is in the root directory, the object_name is the filename only
                if len(parts) > 1:
                    object_name = str(Path(*list(parts)[1:]) / Path(file_name))
                else:
                    object_name = str(Path(file_name))

                file_uuid = str(Path(bucket_name) / Path(object_name))
                file_id = item.id
                created_at = item.created_at
                last_modified = item.updated_at
                fmd = FileMetaData(
                    bucket_name=bucket_name,
                    file_name=file_name,
                    object_name=object_name,
                    location=DATCORE_STR,
                    location_id=DATCORE_ID,
                    file_uuid=file_uuid,
                    file_id=file_id,
                    raw_file_path=file_uuid,
                    display_file_path=file_uuid,
                    created_at=created_at,
                    last_modified=last_modified,
                    file_size=file_size,
                )
                files.append(fmd)

    def create_dataset(self, ds_name, *, force_delete=False):
        """
        Creates a new dataset for the current user and returns it. Returns existing one
        if there is already a dataset with the given name.

        Args:
            ds_name (str): Name for the dataset (_,-,' ' and capitalization are ignored)
            force_delete (bool, optional): Delete first if dataset already exists
        """
        ds = None
        with suppress(Exception):
            ds = self._bf.get_dataset(ds_name)
            if force_delete:
                ds.delete()
                ds = None

        if ds is None:
            ds = self._bf.create_dataset(ds_name)

        return ds

    def get_dataset(self, ds_name, create_if_not_exists=False):
        """
        Returns dataset with the given name. Creates it if required.

        Args:
            ds_name (str): Name for the dataset
            create_if_not_exists (bool, optional): Create first if dataset already exists
        """

        ds = None
        with suppress(Exception):
            ds = self._bf.get_dataset(ds_name)

        if ds is None and create_if_not_exists:
            ds = self._bf.create_dataset(ds_name)

        return ds

    def delete_dataset(self, ds_name):
        """
        Deletes dataset with the given name.

        Args:
            ds_name (str): Name for the dataset
        """

        # this is not supported
        ds = self.get_dataset(ds_name)
        if ds is not None:
            self._bf.delete(ds.id)

    def exists_dataset(self, ds_name):
        """
        Returns True if dataset with the given name exists.

        Args:
            ds_name (str): Name for the dataset
        """

        ds = self.get_dataset(ds_name)
        return ds is not None

    def upload_file(self,
                    destination: str,
                    filepath: str,
                    meta_data=None) -> bool:
        """
        Uploads a file to a given dataset/collection given its filepath on the host. Optionally
        adds some meta data

        Args:
            dataset (dataset): The dataset into whioch the file shall be uploaded
            filepath (path): Full path to the file
            meta_data (dict, optional): Dictionary of meta data

        Note:
            Blackfynn postprocesses data based on filendings. If it can do that
            the filenames on the server change.
        """
        # parse the destination and try to find the package_id to upload to
        collection, collection_id = self._collection_from_destination(
            destination)

        if collection is None:
            return False

        files = [
            filepath,
        ]
        self._bf._api.io.upload_files(collection,
                                      files,
                                      display_progress=True,
                                      use_agent=False)
        collection.update()

        if meta_data is not None:
            for f in files:
                filename = os.path.basename(f)
                package = self.get_package(collection, filename)
                if package is not None:
                    self._update_meta_data(package, meta_data)

        return True

    def _update_meta_data(self, package, meta_data):
        """
        Updates or replaces metadata for a package

        Args:
            package (package): The package for which the meta data needs update
            meta_data (dict): Dictionary of meta data
        """

        for key in meta_data.keys():
            package.set_property(key, meta_data[key], category="simcore")

        package.update()

    def download_file(self, source, filename, destination_path):
        """
        Downloads a frile from a source dataset/collection given its filename. Stores
        it under destination_path

        Args:
            source (dataset/collection): The dataset or collection to donwload from
            filename (str): Name of the file
            destination__apth (str): Path on host for storing file
        """

        url = self.download_link(source, filename)
        if url:
            _file = urllib.URLopener()  # nosec
            _file.retrieve(url, destination_path)
            return True
        return False

    def download_link(self, destination, filename):
        """
            returns presigned url for download, destination is a dataset or collection
        """
        collection, collection_id = self._collection_from_destination(
            destination)

        for item in collection:
            if isinstance(item, DataPackage):
                if Path(item.files[0].as_dict()["content"]
                        ["s3key"]).name == filename:
                    file_desc = self._bf._api.packages.get_sources(item.id)[0]
                    url = self._bf._api.packages.get_presigned_url_for_file(
                        item.id, file_desc.id)
                    return url

        return ""

    def download_link_by_id(self, file_id):
        """
            returns presigned url for download of a file given its file_id
        """
        url = ""
        filename = ""
        package = self._bf.get(file_id)
        if package is not None:
            filename = Path(
                package.files[0].as_dict()["content"]["s3key"]).name

        file_desc = self._bf._api.packages.get_sources(file_id)[0]
        url = self._bf._api.packages.get_presigned_url_for_file(
            file_id, file_desc.id)

        return url, filename

    def get_package(self, source, filename):
        """
        Returns package from source by name if exists

        Args:
            source (dataset/collection): The dataset or collection to donwload from
            filename (str): Name of the file
        """

        source.update()
        for item in source:
            if item.name == filename:
                return item

        return None

    def delete_file(self, destination, filename):
        """
        Deletes file by name from destination by name

        Args:
            destination (dataset/collection): The dataset or collection to delete from
            filename (str): Name of the file
        """
        collection, collection_id = self._collection_from_destination(
            destination)

        if collection is None:
            return False

        collection.update()
        for item in collection:
            if isinstance(item, DataPackage):
                if Path(item.files[0].as_dict()["content"]
                        ["s3key"]).name == filename:
                    self._bf.delete(item)
                    return True

        return False

    def delete_file_by_id(self, id: str) -> bool:
        """
        Deletes file by id

        Args:
            datcore id for the file
        """
        package: DataPackage = self._bf.get(id)
        package.delete()
        return not package.exists

    def delete_files(self, destination):
        """
        Deletes all files in destination

        Args:
            destination (dataset/collection): The dataset or collection to delete
        """

        collection, collection_id = self._collection_from_destination(
            destination)

        if collection is None:
            return False

        collection.update()
        for item in collection:
            self._bf.delete(item)

    def update_meta_data(self, dataset, filename, meta_data):
        """
        Updates metadata for a file

        Args:
            dataset (package): Which dataset
            filename (str): Which file
            meta_data (dict): Dictionary of meta data
        """

        filename = os.path.basename(filename)
        package = self.get_package(dataset, filename)
        if package is not None:
            self._update_meta_data(package, meta_data)

    def get_meta_data(self, dataset, filename):
        """
        Returns metadata for a file

        Args:
            dataset (package): Which dataset
            filename (str): Which file
        """

        meta_data = {}
        filename = os.path.basename(filename)
        package = self.get_package(dataset, filename)
        if package is not None:
            meta_list = package.properties
            for m in meta_list:
                meta_data[m.key] = m.value

        return meta_data

    def delete_meta_data(self, dataset, filename, keys=None):
        """
        Deletes specified keys in meta data for source/filename.

        Args:
            dataset (package): Which dataset
            filename (str): Which file
            keys (list of str, optional): Deletes specified keys, deletes
            all meta data if None
        """

        filename = os.path.basename(filename)
        package = self.get_package(dataset, filename)
        if package is not None:
            if keys is None:
                for p in package.properties:
                    package.remove_property(p.key, category="simcore")
            else:
                for k in keys:
                    package.remove_property(k, category="simcore")

    def search(self, what, max_count):
        """
        Seraches a thing in the database. Returns max_count results

        Args:
            what (str): query
            max_count (int): Max number of results to return
        """
        return self._bf.search(what, max_count)

    def upload_file_to_id(self, destination_id: str, filepath: str):
        """
        Uploads file to a given dataset/collection by id given its filepath on the host
        adds some meta data.

        Returns the id for the newly created resource

        Note: filepath could be an array

        Args:
            destination_id : The dataset/collection id into which the file shall be uploaded
            filepath (path): Full path to the file
        """
        _id = ""
        destination = self._destination_from_id(destination_id)
        if destination is None:
            return _id

        files = [
            filepath,
        ]

        try:
            # TODO: PC->MAG: should protected API
            # TODO: add new agent SEE https://developer.blackfynn.io/python/latest/CHANGELOG.html#id31
            result = self._bf._api.io.upload_files(destination,
                                                   files,
                                                   display_progress=True,
                                                   use_agent=False)
            if result and result[0] and "package" in result[0][0]:
                _id = result[0][0]["package"]["content"]["id"]

        except Exception:
            logger.exception("Error uploading file to datcore")

        return _id

    def create_collection(self, destination_id: str, collection_name: str):
        """
        Create a empty collection within destination
        Args:
            destination_id : The dataset/collection id into which the file shall be uploaded
            filepath (path): Full path to the file
        """
        destination = self._destination_from_id(destination_id)
        _id = ""

        if destination is None:
            return _id

        new_collection = Collection(collection_name)
        destination.add(new_collection)
        new_collection.update()
        destination.update()
        _id = new_collection.id

        return _id

    def list_datasets(self) -> DatasetMetaDataVec:
        data = []
        for dataset in self._bf.datasets():
            dmd = DatasetMetaData(dataset_id=dataset.id,
                                  display_name=dataset.name)
            data.append(dmd)

        return data
Пример #6
0
        sys.exit()
    elif opt in '-l':
        for ds in dsets:
            printf("%s\n", ds[0])
        sys.exit()
    elif opt in "-f":
        filename = arg
        FILE = True
        if not file_exists(filename):
            printf("file, %s, does not exist.\n", filename)
            sys.exit()
    elif opt in '-d':
        DATASET = True
        dsetname = arg
        try:
            dset = bf.get_dataset(dsdict[arg])
        except:
            printf("Dataset, %s, does NOT exist.\n", arg)
            sys.exit()

# Using single Dataset
if not ALL and SOURCE and DESTINATION and DATASET:
    destination = locate_path(dset, dest)
    source = locate_path(dset, src)
    bf.move(destination, source)
    printf("%s: %s moved to %s\n", dsetname, src, dest)

# Using a file containing Dataset names
elif not ALL and not DATASET and FILE:
    with open(filename) as f:
        fdsets = f.read().splitlines()
Пример #7
0
except getopt.GetoptError:
    printf("%s\n", syntax())
    sys.exit()

dsets, dsdict = get_datasets()

for opt, arg in opts:
    if opt == '-h':
        printf("%s\n", syntax())
        sys.exit()

    elif opt in '-l':
        for ds in dsets:
            printf("%s\n", ds[0])
        sys.exit()

    elif opt in '-d':
        dset = bf.get_dataset(dsdict[arg])

    elif opt in '-n':
        if db_exists(arg, dsets):
            printf("Dataset %s already exists.  Can not continue.\n", arg)
            EXISTS = True
            sys.exit()
        else:
            printf("Creating new dataset: %s\n", arg)
            bf.create_dataset(arg)
            newdset = bf.get_dataset(arg)

create_duplicate(dset, newdset)
class BlackfynnDataModel(object):

    def __init__(self):
        self._settings = {'active-profile': ''}
        self._cache = {}
        self._bf = None

    def addProfile(self, profile):
        self._settings[profile['name']] = {'api_token': profile['token'], 'api_secret': profile['secret']}

    def setActiveProfile(self, profile_name):
        self._settings['active-profile'] = profile_name

    def getActiveProfile(self):
        return self._settings['active-profile']

    def getExistingProfileNames(self):
        profile_names = [*self._settings]
        profile_names.remove('active-profile')
        return profile_names

    def _getBlackfynn(self, profile_name):
        api_key = self._settings[profile_name]['api_token']
        api_secret = self._settings[profile_name]['api_secret']
        print('[{0}]:[{1}]'.format(api_key, api_secret))
        self._bf = Blackfynn(api_token=api_key, api_secret=api_secret)
        return self._bf

    def getDatasets(self, profile_name, refresh=False):
        if profile_name in self._cache and not refresh:
            datasets = self._cache[profile_name]['datasets']
        elif refresh:
            bf = self._getBlackfynn(profile_name)
            datasets = bf.datasets()
            if profile_name in self._cache:
                self._cache[profile_name]['datasets'] = datasets
            else:
                self._cache[profile_name] = {'datasets': datasets}
        else:
            datasets = []

        return datasets

    def getDataset(self, profile_name, dataset_name, refresh=False):
        if profile_name in self._cache and dataset_name in self._cache[profile_name] and not refresh:
            dataset = self._cache[profile_name][dataset_name]
        elif refresh:
            bf = self._getBlackfynn(profile_name)
            dataset = bf.get_dataset(dataset_name)
            self._cache[profile_name][dataset_name] = dataset
        else:
            dataset = []

        return dataset

    def getTimeseriesData(self, profile_name, dataset_name, timeseries_name):
        for stored_dataset in self._cache[profile_name][dataset_name]:
            if stored_dataset.name == timeseries_name:
                timeseries_dframe = stored_dataset.get_data(length='16s')

        cache_output = self._create_file_cache(timeseries_dframe)
        absolute_timeseries_values = timeseries_dframe.axes[0]
        relative_times = []
        for time in absolute_timeseries_values:
            relative_times.append( round( time.timestamp() - absolute_timeseries_values[0].timestamp(), 6) )
        return [cache_output, relative_times]


    def _create_file_cache(self, data_frame):

        cache_dictionary = {}
        keys = natsorted(data_frame.keys()) # Sort the keys in 'natural' order
        for key in keys:
            cache_dictionary[key] = data_frame[key].values.tolist()

        return cache_dictionary


    def uploadRender(self, filePath):
        # uploadRender: Takes a given file path and uploads it to blackfynn in a folder called 'Zinc Exports' for the
        #               user currently logged in.
        try:
            ds = self._bf.get_dataset('Zinc Exports')
        except:
            self._bf.create_dataset('Zinc Exports')
            ds = self._bf.get_dataset('Zinc Exports')
        ds.upload(filePath)

    def getSettings(self):
        return self._settings

    def setSettings(self, settings):
        print('set settings {0}',format(settings))
        self._settings.update(settings)
Пример #9
0
except getopt.GetoptError:
    printf("%s\n", syntax())
    sys.exit()

dsets, dsdict = get_datasets()

for opt, arg in opts:
    if opt == '-h':
        printf("%s\n", syntax())
        sys.exit()
    elif opt == '-i':
        CASE=False
        printf("Note: case-insensitive compares\n")
    elif opt in ('-d'):
        try:
            dset = bf.get_dataset(dsdict[arg])
        except:
            printf("Dataset, %s, does NOT exist.\n", arg)
            sys.exit()
    elif opt == '--all':
        ALL = True
        dsets = []
        for ds in bf.datasets():
            if 'HPAP-' in ds.name: dsets.append(ds)
        dsets.sort(key = lambda x: x.name)
    elif opt in ('-l'):
        for ds in dsets: printf("%s\n", ds[0])
        sys.exit()
    elif opt in ('--data'):
        FILE = True
    elif opt in ('-p'):
Пример #10
0
def convert_ncs2bfts(dataFolder,
                     resultFolder=None,
                     bfFileName=None,
                     dsName=None,
                     fs=None):
    """
        param dataFolder: folder containing ncs files
        param resultFolder: folder to contain results
        param bfFileName: name to save generated bfts file
        param dsName: name (or id) of dataset
        param fs: sampling rate
        return: _____
    """
    dataFolder = os.path.abspath(dataFolder)
    if not bfFileName:
        bfFileName = os.path.basename(dataFolder) + '.bfts'
    if resultFolder:
        resultFolder = os.path.abspath(resultFolder)
        resultFile = os.path.join(resultFolder, bfFileName)
    else:
        resultFile = bfFileName
#
    if os.path.isfile(resultFile):
        print(bfFileName, 'exists')
    else:
        print('Converting to', bfFileName, '...')
        chls = OrderedDict()  # dictionary to store channel values
        for chFile in os.listdir(dataFolder):
            if chFile.endswith('ncs'):
                ncs = ncs2py.load_ncs(
                    os.path.join(dataFolder, chFile)
                )  # import neuralynx data. NOTE: import stores information as a dictionary
                rawData = ncs['data']
                rawData = resample_poly(rawData, 1.0,
                                        ncs['sampling_rate'] / fs)
                chls.update({'ch' + chFile.split('.')[0][3:]: rawData})
#
        TimeCreated = [
            line.strip() for line in ncs['raw_header'].split('\r\n')
            if line != '' if 'TimeCreated' == line.strip()[1:12]
        ][0]
        TimeCreated = ncs2py.parse_neuralynx_time_string(TimeCreated)
        TimeCreated = (TimeCreated -
                       datetime(1970, 1, 1)).total_seconds() * (1e6)
        timeVec = ncs['timestamp'] + TimeCreated
        timeVec = arange(timeVec[0],
                         timeVec[-1], (1.0 / fs) * (1e6),
                         dtype=int64)
        sampleSize = timeVec.shape[0]
        #
        df = DataFrame(chls)[0:sampleSize]
        df.insert(0, 'timeStamp', timeVec)
        df.to_csv(resultFile, index=False)
#
    if dsName:
        bf = Blackfynn()
        ds = bf.get_dataset(dsName)
        if os.path.basename(resultFile)[:-5] in ds.get_items_names():
            print(bfFileName, 'uploaded')
        else:
            print('uploading', bfFileName, 'to Blackfynn...')
            ds.upload(resultFile)
Пример #11
0
def blackfynn_cli():
    args = docopt(__doc__,
                  version='bf version {}'.format(blackfynn.__version__),
                  options_first=True)

    # Test for these two commands first as they
    # do not require a Blackfynn client
    if args['<command>'] in ['help', None]:
        print((__doc__.strip('\n')))
        return

    if args['<command>'] == 'profile':
        from blackfynn.cli import bf_profile
        bf_profile.main()
        return

    # Display warning message if config.ini is not found
    settings = Settings(
    )  # create a dummy settings object to load environment variables and defaults only
    if not os.path.exists(settings.config_file):
        print(
            "\033[31m* Warning: No config file found, run 'bf profile' to start the setup assistant\033[0m"
        )

    # Try to use profile specified by --profile, exit if invalid
    try:
        bf = Blackfynn(args['--profile'])
    except Exception as e:
        exit(e)

    #Try to use dataset specified by --dataset, exit if invalid
    try:
        if args['--dataset'] is not None:
            dataset = bf.get_dataset(args['--dataset'])
            set_working_dataset(dataset.id)
    except Exception as e:
        exit(e)

    if args['<command>'] == 'status':
        from blackfynn.cli import bf_status
        bf_status.main(bf)
    elif args['<command>'] == 'use':
        from . import bf_use
        bf_use.main(bf)
    elif args['<command>'] == 'init':
        from . import bf_init
        bf_init.main(bf)
    elif args['<command>'] in ['datasets', 'ds']:
        from . import bf_datasets
        bf_datasets.main(bf)
    elif args['<command>'] in ['organizations', 'orgs']:
        from . import bf_organizations
        bf_organizations.main(bf)
    elif args['<command>'] in ['share', 'unshare', 'collaborators']:
        from . import bf_share
        bf_share.main(bf)
    elif args['<command>'] == 'cache':
        from . import bf_cache
        bf_cache.main(bf)
    elif args['<command>'] == 'create':
        from . import bf_create
        bf_create.main(bf)
    elif args['<command>'] == 'delete':
        from . import bf_delete
        bf_delete.main(bf)
    elif args['<command>'] == 'move':
        from . import bf_move
        bf_move.main(bf)
    elif args['<command>'] == 'rename':
        from . import bf_rename
        bf_rename.main(bf)
    elif args['<command>'] == 'props':
        from . import bf_props
        bf_props.main(bf)
    elif args['<command>'] == 'get':
        from . import bf_get
        bf_get.main(bf)
    elif args['<command>'] == 'where':
        from . import bf_where
        bf_where.main(bf)
    elif args['<command>'] == 'upload':
        from . import bf_upload
        bf_upload.main(bf)
    elif args['<command>'] == 'append':
        from . import bf_append
        bf_append.main(bf)
    elif args['<command>'] == 'search':
        from . import bf_search
        bf_search.main(bf)
    else:
        exit("Invalid command: '{}'\nSee 'bf help' for available commands".
             format(args['<command>']))
Пример #12
0
import glob
import os
import time
import sys
if sys.platform == 'win32':
    rootPath = 'R:\\'
else:
    rootPath = os.path.join('//', 'media', 'rnelshare')

sys.path.append(
    os.path.join(rootPath, 'users', 'amn69', 'Projects', 'cat', 'selectivity',
                 'surface paper', 'v2018'))
import helperFcns as hf

bf = Blackfynn('lumbar_selectivity')
catDS = bf.get_dataset('N:dataset:1cc6b671-0dea-4aab-ad30-ed3884e17028')

db = hf.db
collection = db.blackfynnUpload

for iSub in ['HA04'
             ]:  #['Electro','Freeze','HA02','HA04','Galactus','Hobgoblin']:
    print iSub
    if collection.find({'subject': iSub}).count() == 0:
        subjFolder = catDS.create_collection(iSub)
    else:
        subjFolder = catDS.get_items_by_name(iSub)[0]

    targetSessions = sorted(hf.PWbySession[iSub].keys())
    for iSesh in targetSessions:
        print iSesh
Пример #13
0
class DatcoreClient(object):
    def __init__(self, api_token=None, api_secret=None, host=None, streaming_host=None):
        self.client = Blackfynn(profile=None, api_token=api_token, api_secret=api_secret,
                                host=host, streaming_host=streaming_host)
    def _context(self):
        """
        Returns current organizational context
        """
        return self.client.context

    def profile(self):
        """
        Returns profile of current User
        """
        return self.client.profile

    def organization(self):
        """
        Returns organization name
        """
        return self.client.context.name

    def list_datasets(self):
        ds = []
        for item in self.client.datasets():
            ds.append(item.name)

        return ds


    def list_files(self):
        files = []
        for ds in self.client.datasets():
            for item in ds:
                files.append(os.path.join(ds.name, item.name))

        return files

    def create_dataset(self, ds_name, force_delete=False):
        """
        Creates a new dataset for the current user and returns it. Returns existing one
        if there is already a dataset with the given name.

        Args:
            ds_name (str): Name for the dataset (_,-,' ' and capitalization are ignored)
            force_delete (bool, optional): Delete first if dataset already exists
        """

        ds = None
        try:
            ds = self.client.get_dataset(ds_name)
            if force_delete:
                delete_dataset(ds)
                ds = None
        except Exception: # pylint: disable=W0703
            pass

        if ds is None:
            ds = self.client.create_dataset(ds_name)

        return ds

    def get_dataset(self, ds_name, create_if_not_exists=False):
        """
        Returns dataset with the given name. Creates it if required.

        Args:
            ds_name (str): Name for the dataset
            create_if_not_exists (bool, optional): Create first if dataset already exists
        """

        ds = None
        try:
            ds = self.client.get_dataset(ds_name)
        except Exception: # pylint: disable=W0703
            pass

        if ds is None and create_if_not_exists:
            ds = self.client.create_dataset(ds_name)

        return ds

    def delete_dataset(self, ds_name):
        """
        Deletes dataset with the given name.

        Args:
            ds_name (str): Name for the dataset
        """

        # this is not supported
        ds = self.get_dataset(ds_name)
        if ds is not None:
            self.client.delete(ds.id)

    def exists_dataset(self, ds_name):
        """
        Returns True if dataset with the given name exists.

        Args:
            ds_name (str): Name for the dataset
        """

        ds = self.get_dataset(ds_name)
        return ds is not None

    def upload_file(self, dataset, filepaths, meta_data = None):
        """
        Uploads a file to a given dataset given its filepath on the host. Optionally
        adds some meta data

        Args:
            dataset (dataset): The dataset into whioch the file shall be uploaded
            filepath (path): Full path to the file
            meta_data (dict, optional): Dictionary of meta data

        Note:
            Blackfynn postprocesses data based on filendings. If it can do that
            the filenames on the server change. This makes it difficult to retrieve
            them back by name (see get_sources below). Also, for now we assume we have
            only single file data.
        """


        if isinstance(filepaths, list):
            files = filepaths
        else:
            files = [filepaths]
        # pylint: disable = E1101
        self.client._api.io.upload_files(dataset, files, display_progress=True)
        dataset.update()

        if meta_data is not None:
            filename = os.path.basename(filepath)
            package = self.get_package(dataset, filename)
            if package is not None:
                self._update_meta_data(package, meta_data)

    def _update_meta_data(self, package, meta_data):
        """
        Updates or replaces metadata for a package

        Args:
            package (package): The package for which the meta data needs update
            meta_data (dict): Dictionary of meta data
        """

        for key in meta_data.keys():
            package.set_property(key, meta_data[key], category='simcore')

        package.update()

    def download_file(self, source, filename, destination_path):
        """
        Downloads a frile from a source dataset/collection given its filename. Stores
        it under destination_path

        Args:
            source (dataset/collection): The dataset or collection to donwload from
            filename (str): Name of the file
            destination__apth (str): Path on host for storing file
        """

        # pylint: disable = E1101
        url = self.download_link(source, filename)
        if url:
            _file = urllib.URLopener()
            _file.retrieve(url, destination_path)
            return True
        return False

    def download_link(self, source, filename):
        """
            returns presigned url for download, source is a dataset
        """

        # pylint: disable = E1101

        for item in source:
            if item.name == filename:
                file_desc = self.client._api.packages.get_sources(item.id)[0]
                url = self.client._api.packages.get_presigned_url_for_file(item.id, file_desc.id)
                return url

        return ""

    def exists_file(self, source, filename):
        """
        Checks if file exists in source

        Args:
            source (dataset/collection): The dataset or collection to donwload from
            filename (str): Name of the file
        """

        source.update()
        for item in source:
            if item.name == filename:
                return True

        return False

    def get_package(self, source, filename):
        """
        Returns package from source by name if exists

        Args:
            source (dataset/collection): The dataset or collection to donwload from
            filename (str): Name of the file
        """

        source.update()
        for item in source:
            if item.name == filename:
                return item

        return None

    def delete_file(self, source, filename):
        """
        Deletes file by name from source by name

        Args:
            source (dataset/collection): The dataset or collection to donwload from
            filename (str): Name of the file
        """
        source.update()
        for item in source:
            if item.name == filename:
                self.client.delete(item)

    def delete_files(self, source):
        """
        Deletes all files in source

        Args:
            source (dataset/collection): The dataset or collection to donwload from
        """

        source.update()
        for item in source:
            self.client.delete(item)

    def update_meta_data(self, dataset, filename, meta_data):
        """
        Updates metadata for a file

        Args:
            dataset (package): Which dataset
            filename (str): Which file
            meta_data (dict): Dictionary of meta data
        """

        filename = os.path.basename(filename)
        package = self.get_package(dataset, filename)
        if package is not None:
            self._update_meta_data(package, meta_data)


    def get_meta_data(self, dataset, filename):
        """
        Returns metadata for a file

        Args:
            dataset (package): Which dataset
            filename (str): Which file
        """

        meta_data = {}
        filename = os.path.basename(filename)
        package = self.get_package(dataset, filename)
        if package is not None:
            meta_list = package.properties
            for m in meta_list:
                meta_data[m.key] = m.value

        return meta_data

    def delete_meta_data(self, dataset, filename, keys=None):
        """
        Deletes specified keys in meta data for source/filename.

        Args:
            dataset (package): Which dataset
            filename (str): Which file
            keys (list of str, optional): Deletes specified keys, deletes
            all meta data if None
        """

        filename = os.path.basename(filename)
        package = self.get_package(dataset, filename)
        if package is not None:
            if keys is None:
                for p in package.properties:
                    package.remove_property(p.key, category='simcore')
            else:
                for k in keys:
                    package.remove_property(k, category='simcore')

    def search(self, what, max_count):
        """
        Seraches a thing in the database. Returns max_count results

        Args:
            what (str): query
            max_count (int): Max number of results to return
        """
        return self.client.search(what, max_count)
Пример #14
0
        if outdir == '.': outdir = os.getcwd()
    elif opt in ('-l'):
        for ds in dsets: printf("%s\n", ds[0])
        sys.exit()
    elif opt in ('-c'):
        CATEGORY = True
        cat = arg
        if cat not in categories:
            printf("Category, %s, does NOT exist.\n", arg)
            sys.exit()
    elif opt in ('-d'):
        DATASET = True
        try:
            dsname = arg
            if dsname.lower() != "all":
                dset = bf.get_dataset(dsdict[dsname])
            else:
                dset = "all"
        except:
            printf("Dataset, %s, does NOT exist.\n", arg)
            sys.exit()
    elif opt == '-q':
        QUICKSYNC = True
#################
# start the sync
#################
if DATASET and dsname != "all":
    printf("Gathering Collections from %s ...\n",dset.name)
    collections = list()
    dslist = get_collections(dset,collections)
    dspaths,relpaths = create_paths(dsname, outdir, dslist)
Пример #15
0
import click
import sparc_dash
from blackfynn import Blackfynn

bf = Blackfynn('sparc-consortium')
ds = bf.get_dataset('SPARC Datasets')


@click.group()
def cli():
    pass


@click.command()
def clear():
    print('Clearing')
    out = sparc_dash.clearRecords(ds)


@click.command()
def create_models():
    """Example script."""
    out = sparc_dash.create_models(ds)


@click.command()
def update():
    """Example script."""
    out = sparc_dash.update(bf, ds)