def publish(self): """Publish the uploaded data to the selected dataset. A dataset can be published just once. Returns: response: Response from the Intelligence Server acknowledging the publication process. """ response = datasets.publish(connection=self._connection, dataset_id=self._dataset_id, session_id=self._session_id) if not response.ok: # on error, cancel the previously uploaded data datasets.publish_cancel(connection=self._connection, dataset_id=self._dataset_id, session_id=self._session_id) status = 6 # default initial status while status != 1: pub = datasets.publish_status(connection=self._connection, dataset_id=self._dataset_id, session_id=self._session_id) pub = pub.json() status = pub['status'] if status == 1: print("Dataset '%s' published successfully." % self._name)
def create(self, folder_id=None, auto_upload=True, chunksize=100000, progress_bar=True, verbose=False): """Creates a new dataset. Args: folder_id (str, optional): ID of the shared folder that the dataset should be created within. If `None`, defaults to the user's My Reports folder. auto_upload: If True, automatically uploads the data used to create the dataset definition to the dataset. If False, simply creates the dataset but does not upload data to it. chunksize (int, optional): Number of rows to transmit to the server with each request when uploading. progress_bar(bool, optional): If True (default), show the upload progress bar. verbose: If True, prints status information about the dataset upload. """ if folder_id is not None: self._folder_id = folder_id else: self._folder_id = "" # generate model of the dataset self.__build_model() # makes request to create the dataset response = datasets.create_multitable_dataset( connection=self._connection, body=self.__model) if not response.ok: self.__response_handler(response=response, msg="Error creating new dataset model.") else: response_json = response.json() self._dataset_id = response_json['id'] if verbose: print("Created dataset '{}' with ID: '{}'.".format( *[self._name, self._dataset_id])) # if desired, automatically upload and publish the data to the new dataset if auto_upload: self.update(chunksize=chunksize, progress_bar=progress_bar) self.publish() status = 6 # default initial status while status != 1: pub = datasets.publish_status(connection=self._connection, dataset_id=self._dataset_id, session_id=self._session_id) if not pub.ok: self.__response_handler( response=pub, msg="Error publishing the dataset.") break else: pub = pub.json() status = pub['status'] if status == 1: break
def publish_status(self): """Check the status of data that was uploaded to a dataset. Returns: status: The status of the publication process as a dictionary. In the 'status' key, "1" denotes completion. """ response = datasets.publish_status(connection=self._connection, dataset_id=self._dataset_id, session_id=self._session_id) status = response.json() return status
def upload_status(self, connection, dataset_id, session_id): """Check the status of data that was uploaded to a dataset. Args: connection: MicroStrategy connection object returned by `microstrategy.Connection()`. dataset_id (str): Identifier of a pre-existing dataset. session_id (str): Identifer of the server session used for collecting uploaded data. """ response = datasets.publish_status( connection=connection, dataset_id=dataset_id, session_id=session_id) response_handler(response=response, msg="Publication status for dataset with ID: '{}':".format(dataset_id), throw_error=False)
def publish_status(self): """Check the status of data that was uploaded to a super cube. Returns: status: The status of the publication process as a dictionary. In the 'status' key, "1" denotes completion. """ if not self._session_id: raise AttributeError("No upload session created.") else: response = datasets.publish_status(connection=self._connection, id=self._id, session_id=self._session_id) return response.json()
def upload_status(self, connection, id, session_id): """Check the status of data that was uploaded to a super cube. Args: connection: MicroStrategy connection object returned by `connection.Connection()`. id (str): Identifier of a pre-existing super cube. session_id (str): Identifier of the server session used for collecting uploaded data. """ # TODO not sure why we have this functionality twice response = datasets.publish_status(connection=connection, id=id, session_id=session_id) helper.response_handler( response=response, msg="Publication status for super cube with ID: '{}':".format(id), throw_error=False)