Exemple #1
0
def auth_required():
    """Check if user is authed and exit if not"""
    if not authed():
        log('You must be logged in to perfom that action.\n'
            'Use \'tensorci login\' if you already have an account, or visit '
            'https://tensorci.com to create a new account.')
        exit(1)
Exemple #2
0
def dash():
  """
  Open TensorCI dashboard for this project.

  Ex: tensorci dash
  """
  # Find this git project's remote url from inside .git/config
  git_repo = gitconfig.get_remote_url()

  # Parse out team and repo from remote git_url
  path_match = re.match('/(.*).git', urlparse(git_repo).path)
  team = None
  repo = None

  if path_match:
    path = path_match.groups()[0]
    team, repo = path.split('/')

  if not team or not repo:
    log('Error parsing remote git repo: {}'.format(git_repo))
    return

  # Build dashboard url from TensorCI Dashboard url, the team, and the repo.
  url = '{}/{}/{}'.format(config.DASH_URL, team.lower(), repo.lower())

  try:
    # Open dashboard url in new tab of default browser
    url_helper.open_url(url)
  except BaseException:
    log('Failed to open URL: {}'.format(url))
Exemple #3
0
    def is_valid(self):
        """
    Validate each of the config file's key-val pairs

    :return: Whether the config file is valid or not
    :rtype: bool
    """
        # File has to exist in order to be valid...
        if not os.path.exists(self.path):
            log('A {} config file must exist before running this command.\n'.
                format(self.FILE_NAME) +
                'Run \'tensorci init\' to initialize your project and create this file.'
                )
            return False

        # Find which keys are invalid
        invalid_keys = []
        for k, v in self.config.items():
            if not v.is_valid():
                invalid_keys.append(k)

        # Tell the user which keys were invalid
        if invalid_keys:
            log('Invalid config keys: {}'.format(', '.join(invalid_keys)))

        return len(invalid_keys) == 0
Exemple #4
0
    def log_error(self):
        """
    Log the error parsed from the JSON body.

    If a 'log' key was found in the body, it's assumed that this is
    the specified error message that should be shown to the user.

    Otherwise, an error message is constructed from the 'error' key,
    the response's status code, and custom error 'code' key.
    """
        if self.ok:
            return

        # Log the provided 'log' message if it exists.
        provided_err_log_msg = self.json.get('log')

        if provided_err_log_msg:
            log(provided_err_log_msg)
            return

        # If log message not provided, construct an error message.
        err_msg = 'Request failed'
        error = self.json.get('error')
        code = self.json.get('code')

        if error:
            err_msg += ' with error: {}'.format(error)

        if code:
            err_msg += '; code={}'.format(code)

        err_msg += '; status={}'.format(self.status)

        log(err_msg)
Exemple #5
0
def create_model_save_path(path, download_ext):
  """
  Create path to save a model to.
  Validate the provided and desired extensions are compatible.

  :param str path: Desired model path (either filename or directory)
  :param str download_ext: File extension of downloaded model content
  :return: Path to save model to.
  :rtype: str
  """
  archive = 'zip'  # All model folders should be downloaded as zip files.

  # Check if the specified model path is a directory
  is_dir = path.endswith('/') or ('.' not in path.split('/').pop())

  # Ensure downloaded file is a zip file if model path is a directory
  if is_dir:
    if download_ext != archive:
      log('Can\'t download model -- Specified model path was a directory, '
          'but downloaded model not a {} file.'.format(archive))
      exit(1)

    save_to = '.'.join((path.rstrip('/'), archive))
  else:
    ext = path.split('/').pop().split('.').pop()

    # If model is a file (not a dir), ensure its ext matches the downloaded file ext.
    if ext != download_ext:
      log('Can\'t donwload model -- Expected type: {}; Actual type: {}'.format(ext, download_ext))
      exit(1)

    save_to = path

  return save_to
Exemple #6
0
def version():
    """
  Show the current CLI version.

  Ex: tensorci version
  """
    log(v)
Exemple #7
0
def logout():
  """
  Logout of TensorCI.

  Ex: tensorci logout
  """
  # Remove authed session from netrc
  auth.delete()
  log('Successfully logged out.')
Exemple #8
0
def login(username, password):
    """
  Login as a TensorCI user.

  If --username (-u) and --password (-p) are not provided as options, the user
  will be prompted for these values.

  Ex: tensorci login
  """
    log('Enter your TensorCI credentials:')

    # Prompt user for username unless already provided
    username = (username or click.prompt('GitHub Username')).strip()

    # Can't proceed without username :/
    if not username:
        log('GitHub username is required.')
        return

    # Prompt user for password unless already provided
    pw = (password or click.prompt('CLI Password', hide_input=True)).strip()

    # Can't proceed without pw :/
    if not pw:
        log('CLI password is required.')
        return

    # Construct API payload
    payload = {
        'username': username,
        'password': pw,
        'provider': 'github'  # hard-coding for now since only supporting GH
    }

    try:
        resp = api.post('/provider_user/login',
                        payload=payload,
                        log_on_error=False,
                        exit_on_error=False)
    except KeyboardInterrupt:
        return

    # Log the error if the login failed.
    if not resp.ok:
        log('Authentication failed.')
        return

    # Get the user_token provided in the response headers
    user_token = resp.headers.get(auth_header_name)

    # Create a new authed session in netrc with the user_token as the password
    auth.create(password=user_token)

    log('Logged in as {}.'.format(username))
    def callback(monitor):
        # If upload has completed...
        if monitor.bytes_read == encoder.len:
            # Hack around that this callback is called twice when completed.
            if not hasattr(monitor, 'finished'):
                setattr(monitor, 'finished', True)

                # Show upload progress.
                bar.show(monitor.bytes_read)

                # Display completion log if provided.
                if completion_log:
                    log(completion_log)
        else:
            # Show upload progress.
            bar.show(monitor.bytes_read)
Exemple #10
0
    def log_stream(self, chunk_size=10, lines_to_ignore=(tci_keep_alive)):
        """
    Log the streaming response by parsing and iterating over lines of the response.

    :param int chunk_size: Chunk size to parse response with (default=10)
    :param tuple(str) lines_to_ignore: Tuple of log messages to ignore.
    """
        if not self.stream:
            return

        try:
            for line in self.response_obj.iter_lines(chunk_size=chunk_size):
                if not line or line in lines_to_ignore:
                    continue

                log(line)
        except KeyboardInterrupt:
            exit(0)
        except BaseException as e:
            log('Error while parsing logs: {}'.format(e))
Exemple #11
0
def logs(follow):
    """
  Show logs from the latest training session.

  Includes logs from preprocessing, training, and testing steps.

  If the --follow (-f) option is provided, the logs will be streamed
  and followed in real-time.

  Ex: tensorci logs -f
  """
    # Must already be logged in to perform this command.
    auth_required()

    # Find this git project's remote url from inside .git/config
    git_repo = gitconfig.get_remote_url()

    # Built the payload.
    payload = {
        'git_url': git_repo,
        'follow': str(follow).lower(
        )  # 'true' or 'false' --> will be converted into query param anyways
    }

    try:
        # Get the logs for this deployment.
        resp = api.get('/deployment/logs', payload=payload, stream=follow)
    except KeyboardInterrupt:
        return

    if follow:
        # Streaming log response
        resp.log_stream()
    else:
        # JSON dump of logs
        logs = resp.json.get('logs')

        if logs:
            log('\n'.join(logs))
Exemple #12
0
def dataset(name, file):
    """
  Create a TensorCI dataset from a JSON file.
  The dataset will be associated with the TensorCI project of the current working directory.
  The dataset's name will default to the name of the project if not specified.

  Ex: tensorci create dataset -f mydataset.json
  """
    # Must already be logged in to perform this command.
    auth_required()

    # Find this git project's remote url from inside .git/config
    git_repo = gitconfig.get_remote_url()

    # If dataset name was specified, slugify it.
    if name:
        dataset_slug = to_slug(name)
    else:
        # Default dataset slug will just be the repo name.
        dataset_slug = None

    # Make sure file actually exists.
    if not os.path.exists(file):
        log('No file found at path {}'.format(file))
        return

    # Require dataset file to be JSON.
    if not file.endswith('.json'):
        log('Dataset must be a JSON file (i.e. dataset.json)')
        return

    # Build the payload.
    payload = {
        'git_url': git_repo,
        'dataset_slug': dataset_slug,
        'file': ('dataset.json', open(file, 'rb'), 'application/json')
    }

    # Create a multipart encoder.
    encoder = MultipartEncoder(fields=payload)

    # Create progress callback, specifying message to show when upload completes.
    callback = create_callback(
        encoder, completion_log='\nConverting dataset to database...')

    # Create a monitor for the encoder so we can track upload progress.
    monitor = MultipartEncoderMonitor(encoder, callback)

    headers = {'Content-Type': monitor.content_type}

    try:
        # Upload the dataset.
        api.post('/dataset', headers=headers, mp_upload_monitor=monitor)
    except KeyboardInterrupt:
        return

    log('Successfully created dataset.')
Exemple #13
0
    def make_request(self,
                     method,
                     route,
                     payload=None,
                     headers=None,
                     stream=False,
                     mp_upload_monitor=None,
                     log_on_error=True,
                     exit_on_error=True):
        """
    Actually perform the API call.

    :param str method:
      API method to perform. Valid options: 'get', 'post', 'put', 'delete'.
    :param str route:
      API route to hit on top of self.base_url
    :param dict payload:
      Payload to provide with request. For GET and DELETE requests, these are converted into query params.
    :param dict headers:
      Request-specific headers. Will overwrite any self.base_headers with the same key.
    :param bool stream:
      Whether to make a streaming request
    :param mp_upload_monitor:
      Multipart encoder monitor for form-uploaded data
      :type: requests_toolbelt.multipart.encoder.MultipartEncoderMonitor
    :param bool log_on_error:
      Whether to log an error message if the request fails
    :param bool exit_on_error:
      Whether to exit if the request fails
    :return: an API response object
    :rtype: AbstractApiResponse
    """
        # Get the proper method to call on the 'requests' object (get, post, put, or delete).
        request = getattr(requests, method)

        # Build up kwargs to pass to the requests method call.
        request_kwargs = {
            'headers': self.build_request_headers(headers=headers),
            'stream': stream
        }

        if method in ('get', 'delete'):
            # Set the payload as query params for GET and DELETE requests
            request_kwargs['params'] = payload or {}
        elif mp_upload_monitor:
            # If multipart encoder monitor is provided, assign that to the data kwarg
            request_kwargs['data'] = mp_upload_monitor
        else:
            # Otherwise, just set the payload as json
            request_kwargs['json'] = payload or {}

        try:
            # Make the request
            response = request(self.base_url + route, **request_kwargs)
        except KeyboardInterrupt:
            # Allow the user to kill it if taking too long
            exit(0)
        except BaseException as e:
            log('Unknown Error while making request: {}'.format(e))
            exit(1)

        # Create an abstract api response
        api_resp = AbstractApiResponse(response,
                                       stream=stream,
                                       mp_upload=bool(mp_upload_monitor),
                                       log_on_error=log_on_error,
                                       exit_on_error=exit_on_error)

        return api_resp
Exemple #14
0
def model(output):
    """
  Fetch the latest trained model.

  If the --output (-o) option is provided, the model will be saved to
  that specified file path. Otherwise, it will be saved to the value of
  the 'model' key provided in .tensorci.yml.

  Ex: tensorci get model
  """
    # Must already be logged in to perform this command.
    auth_required()

    # Find this git project's remote url from inside .git/config
    git_repo = gitconfig.get_remote_url()

    # Build the payload.
    payload = {'git_url': git_repo}

    try:
        # Download the model file.
        resp = api.get('/repo/model', payload=payload, stream=True)
    except KeyboardInterrupt:
        return

    # If output file was specified, create an absolute path from that.
    if output:
        if output.startswith('/'):  # already abs path
            model_path = output
        else:  # create abs path from cwd
            model_path = os.path.join(os.getcwd(), output)
    else:
        # Load our config file.
        config = ConfigFile().load()

        # Get the model file's absolute path based on cwd.
        model_path = config.abs_model_path()

    # Get the path where we should save the model.
    model_ext = resp.headers.get('Model-File-Type')
    save_to = create_model_save_path(model_path, model_ext)

    # Ensure all parent dirs of 'save_to' file path exist before saving the model.
    upsert_parent_dirs(save_to)

    # Stream model contents to file.
    try:
        total_file_bytes = int(resp.headers.get('Content-Length'))

        # Set up progress bar buffer that will monitor the download while also writing to our desired file.
        dl_stream = ProgressDownloadStream(stream=resp.response_obj,
                                           expected_size=total_file_bytes)
        dl_stream.stream_to_file(save_to)
    except KeyboardInterrupt:
        return
    except BaseException as e:
        log('\nError streaming model file to path {} with error: {}'.format(
            save_to, e))
        return

    # Unzip model file if it's an archive.
    if save_to.endswith('.zip'):
        saved_to = extract_in_place(save_to)
    else:
        saved_to = save_to

    # Get the relative path to the saved model for display purposes.
    relative_saved_to = saved_to.replace('{}/'.format(os.getcwd()), '')

    log('\nSaved model to {}'.format(relative_saved_to))