コード例 #1
0
ファイル: push.py プロジェクト: tamasgal/sregistry-cli
def push(self, path, name, tag=None):
    '''push an image to your Storage. If the collection doesn't exist,
       it is created.
   
       Parameters
       ==========
       path: should correspond to an absolute image path (or derive it)
       name: should be the complete uri that the user has requested to push.
       tag: should correspond with an image tag. This is provided to mirror Docker

    '''
    path = os.path.abspath(path)
    bot.debug("PUSH %s" % path)

    if not os.path.exists(path):
        bot.error('%s does not exist.' % path)
        sys.exit(1)

    # Parse image names
    names = parse_image_name(remove_uri(name), tag=tag)

    # Get the size of the file
    file_size = os.path.getsize(path)
    chunk_size = 4 * 1024 * 1024
    storage_path = "/%s" % names['storage']

    # Create / get the collection
    collection = self._get_or_create_collection(names['collection'])

    # The image name is the name followed by tag
    image_name = os.path.basename(names['storage'])

    # prepare the progress bar
    progress = 0
    bot.show_progress(progress, file_size, length=35)

    # Put the (actual) container into the collection
    with open(path, 'rb') as F:
        self.conn.put_object(names['collection'],
                             image_name,
                             contents=F.read(),
                             content_type='application/octet-stream')

    # Finish up
    bot.show_progress(iteration=file_size,
                      total=file_size,
                      length=35,
                      carriage_return=True)

    # Newline to finish download
    sys.stdout.write('\n')
コード例 #2
0
def stream(url, headers, stream_to=None, retry=True):
    """stream is a get that will stream to file_name. Since this is a worker
       task, it differs from the client provided version in that it requires
       headers.
    """

    bot.debug("GET %s" % url)

    if DISABLE_SSL_CHECK is True:
        bot.warning("Verify of certificates disabled! ::TESTING USE ONLY::")

    # Ensure headers are present, update if not
    response = requests.get(url,
                            headers=headers,
                            verify=not DISABLE_SSL_CHECK,
                            stream=True)

    # Deal with token if necessary
    if response.status_code == 401 and retry is True:
        headers = update_token(response, headers)
        return stream(url, headers, stream_to, retry=False)

    if response.status_code == 200:

        # Keep user updated with Progress Bar
        content_size = None
        if "Content-Length" in response.headers:
            progress = 0
            content_size = int(response.headers["Content-Length"])
            bot.show_progress(progress, content_size, length=35)

        chunk_size = 1 << 20
        with open(stream_to, "wb") as filey:
            for chunk in response.iter_content(chunk_size=chunk_size):
                filey.write(chunk)
                if content_size is not None:
                    progress += chunk_size
                    bot.show_progress(
                        iteration=progress,
                        total=content_size,
                        length=35,
                        carriage_return=False,
                    )

        # Newline to finish download
        sys.stdout.write("\n")

        return stream_to

    bot.exit("Problem with stream, response %s" % response.status_code)
コード例 #3
0
def stream(url, headers, stream_to=None, retry=True):
    '''stream is a get that will stream to file_name. Since this is a worker
       task, it differs from the client provided version in that it requires
       headers.
    '''
    bot.debug("GET %s" % url)

    if DISABLE_SSL_CHECK is True:
        bot.warning('Verify of certificates disabled! ::TESTING USE ONLY::')

    # Ensure headers are present, update if not
    response = requests.get(url,  
                            headers=headers,
                            verify=not DISABLE_SSL_CHECK,
                            stream=True)

    # If we get permissions error, one more try with updated token
    if response.status_code in [401, 403]:
        headers = update_token(headers)
        return stream(url, headers, stream_to, retry=False)

    # Successful Response
    elif response.status_code == 200:

        # Keep user updated with Progress Bar
        content_size = None
        if 'Content-Length' in response.headers:
            progress = 0
            content_size = int(response.headers['Content-Length'])
            bot.show_progress(progress,content_size,length=35)

        chunk_size = 1 << 20
        with open(stream_to,'wb') as filey:
            for chunk in response.iter_content(chunk_size=chunk_size):
                filey.write(chunk)
                if content_size is not None:
                    progress+=chunk_size
                    bot.show_progress(iteration=progress,
                                      total=content_size,
                                      length=35,
                                      carriage_return=False)

        # Newline to finish download
        sys.stdout.write('\n')

        return stream_to 

    bot.error("Problem with stream, response %s" %(response.status_code))
    sys.exit(1)
コード例 #4
0
def stream_response(self, response, stream_to=None, show_progress=True):
    """
       stream response is one level higher up than stream, starting with a 
       response object and then performing the stream without making the
       requests.get. The expectation is that the request was successful 
       (status code 20*).
       show_progress: boolean to show progress bar

       Parameters
       ==========
       response: a response that is ready to be iterated over to download in
                 streamed chunks
       stream_to: the file to stream to


    """
    if response.status_code == 200:

        if show_progress is False:
            bot.quiet = True

        # Keep user updated with Progress Bar, if not quiet
        content_size = None
        if "Content-Length" in response.headers:
            progress = 0
            content_size = int(response.headers["Content-Length"])
            bot.show_progress(progress, content_size, length=35)

        chunk_size = 1 << 20
        with open(stream_to, "wb") as filey:
            for chunk in response.iter_content(chunk_size=chunk_size):
                filey.write(chunk)
                if content_size is not None:
                    progress += chunk_size
                    bot.show_progress(
                        iteration=progress,
                        total=content_size,
                        length=35,
                        carriage_return=False,
                    )

        # Newline to finish download
        sys.stdout.write("\n")

        return stream_to

    bot.exit("Problem with stream, response %s" % (response.status_code))
コード例 #5
0
ファイル: http.py プロジェクト: vsoch/sregistry-cli
def stream_response(self, response, stream_to=None):
    '''
       stream response is one level higher up than stream, starting with a 
       response object and then performing the stream without making the
       requests.get. The expectation is that the request was successful 
       (status code 20*).

       Parameters
       ==========
       response: a response that is ready to be iterated over to download in
                 streamed chunks
       stream_to: the file to stream to


    '''

    if response.status_code == 200:

        # Keep user updated with Progress Bar
        content_size = None
        if 'Content-Length' in response.headers:
            progress = 0
            content_size = int(response.headers['Content-Length'])
            bot.show_progress(progress, content_size, length=35)

        chunk_size = 1 << 20
        with open(stream_to, 'wb') as filey:
            for chunk in response.iter_content(chunk_size=chunk_size):
                filey.write(chunk)
                if content_size is not None:
                    progress += chunk_size
                    bot.show_progress(iteration=progress,
                                      total=content_size,
                                      length=35,
                                      carriage_return=False)

        # Newline to finish download
        sys.stdout.write('\n')

        return stream_to

    bot.error("Problem with stream, response %s" % (response.status_code))
    sys.exit(1)
コード例 #6
0
    def run(self, func, tasks, func2=None):
        '''run will send a list of tasks,
        a tuple with arguments, through a function.
        the arguments should be ordered correctly.
        :param func: the function to run with multiprocessing.pool
        :param tasks: a list of tasks, each a tuple
                      of arguments to process
        :param func2: filter function to run result
                      from func through (optional)
        '''

        # Keep track of some progress for the user
        progress = 1
        total = len(tasks)

        # if we don't have tasks, don't run
        if len(tasks) == 0:
            return

        # If two functions are run per task, double total jobs
        if func2 is not None:
            total = total * 2

        finished = []
        level1 = []
        results = []

        try:
            prefix = "[%s/%s]" % (progress, total)
            bot.show_progress(0, total, length=35, prefix=prefix)
            pool = multiprocessing.Pool(self.workers, init_worker)

            self.start()
            for task in tasks:
                result = pool.apply_async(multi_wrapper,
                                          multi_package(func, [task]))
                results.append(result)
                level1.append(result._job)

            while len(results) > 0:
                result = results.pop()
                result.wait()
                bot.show_progress(progress, total, length=35, prefix=prefix)
                progress += 1
                prefix = "[%s/%s]" % (progress, total)

                # Pass the result through a second function?
                if func2 is not None and result._job in level1:
                    result = pool.apply_async(
                        multi_wrapper, multi_package(func2,
                                                     [(result.get(), )]))
                    results.append(result)
                else:
                    finished.append(result.get())

            self.end()
            pool.close()
            pool.join()

        except (KeyboardInterrupt, SystemExit):
            bot.error("Keyboard interrupt detected, terminating workers!")
            pool.terminate()
            sys.exit(1)

        except Exception as e:
            bot.error(e)

        return finished
コード例 #7
0
ファイル: push.py プロジェクト: ogawa/sregistry-cli
def push(self, path, name, tag=None):
    '''push an image to your Dropbox
   
       Parameters
       ==========
       path: should correspond to an absolute image path (or derive it)
       name: should be the complete uri that the user has requested to push.
       tag: should correspond with an image tag. This is provided to mirror Docker

       if the image is less than 150MB, the standard file_upload is used. If
       larger, the image is uploaded in chunks with a progress bar.

    '''
    path = os.path.abspath(path)
    bot.debug("PUSH %s" % path)

    if not os.path.exists(path):
        bot.exit('%s does not exist.' % path)

    # here is an exampole of getting metadata for a container
    names = parse_image_name(remove_uri(name), tag=tag)

    # Get the size of the file
    file_size = os.path.getsize(path)
    chunk_size = 4 * 1024 * 1024
    storage_path = "/%s" % names['storage']

    # This is MB
    # image_size = os.path.getsize(path) >> 20

    # prepare the progress bar
    progress = 0
    bot.show_progress(progress, file_size, length=35)

    # If image is smaller than 150MB, use standard upload
    with open(path, 'rb') as F:
        if file_size <= chunk_size:
            self.dbx.files_upload(F.read(), storage_path)

        # otherwise upload in chunks
        else:

            start = self.dbx.files_upload_session_start(F.read(chunk_size))
            cursor = dropbox.files.UploadSessionCursor(
                session_id=start.session_id, offset=F.tell())
            commit = dropbox.files.CommitInfo(path=storage_path)

            while F.tell() < file_size:

                progress += chunk_size

                # Finishing up the file, less than chunk_size to go
                if file_size - F.tell() <= chunk_size:
                    self.dbx.files_upload_session_finish(
                        F.read(chunk_size), cursor, commit)

                # Finishing up the file, less than chunk_size to go
                else:
                    self.dbx.files_upload_session_append(
                        F.read(chunk_size), cursor.session_id, cursor.offset)
                    cursor.offset = F.tell()

                # Update the progress bar
                bot.show_progress(iteration=progress,
                                  total=file_size,
                                  length=35,
                                  carriage_return=False)

    # Finish up
    bot.show_progress(iteration=file_size,
                      total=file_size,
                      length=35,
                      carriage_return=True)

    # Newline to finish download
    sys.stdout.write('\n')