Example #1
0
def i_delete_one_of_the_chunks(step):
    manifest = release_local_data()
    remote_dir, manifest_name = os.path.split(world.manifest_url)

    chunk, md5sum, size = choice(choice(manifest["files"])["chunks"])

    chunk_url = os.path.join(remote_dir, chunk)
    world.deleted_chunk = chunk_url
    LOG.debug("Lettuce deleting %s" % chunk_url)

    world.driver.delete(chunk_url)
Example #2
0
def i_delete_one_of_the_chunks(step):
    manifest = release_local_data()
    remote_dir, manifest_name = os.path.split(world.manifest_url)

    chunk, md5sum, size = choice(choice(manifest["files"])["chunks"])

    chunk_url = os.path.join(remote_dir, chunk)
    world.deleted_chunk = chunk_url
    LOG.debug("Lettuce deleting %s" % chunk_url)

    world.driver.delete(chunk_url)
Example #3
0
def i_replace_the_manifest_with_old_repr(step):
    manifest = release_local_data()

    manifest_ini_path = os.path.join(world.basedir, "manifest.ini")
    with open(manifest_ini_path, 'w') as fd:
        convert_manifest(manifest).write(fd)

    world.driver.delete(world.manifest_url)

    world.manifest_url = world.driver.put(manifest_ini_path,
            os.path.join(os.path.dirname(world.manifest_url), ''))
    LOG.debug("NEW %s" % world.manifest_url)
Example #4
0
def i_replace_the_manifest_with_old_repr(step):
    manifest = release_local_data()

    manifest_ini_path = os.path.join(world.basedir, "manifest.ini")
    with open(manifest_ini_path, 'w') as fd:
        convert_manifest(manifest).write(fd)

    world.driver.delete(world.manifest_url)

    world.manifest_url = world.driver.put(manifest_ini_path,
            os.path.join(os.path.dirname(world.manifest_url), ''))
    LOG.debug("NEW %s" % world.manifest_url)
Example #5
0
def i_upload_stream_with_gzipping(context, gzip):
    gzip == 'True'
    src = [cloudfs.NamedStream(stream.src, os.path.basename(stream.src.name)) for stream in context.sources]
    for s in src:
        LOG.debug(s.name)
    time.sleep(1)
    if len(src) == 1:
        src = src[0]
    context.tr = largetransfer.Upload(src, context.remote_dir, gzip=gzip, chunk_size=5)
    context.tr.apply_async()
    try:
        context.tr.join()
    except:
        context.error = sys.exc_info()
    context.manifest = context.tr.manifest
    context.gzip = True
Example #6
0
def convert_manifest(json_manifest):
    assert len(json_manifest["files"]) == 1
    assert json_manifest["files"][0]["compressor"] == "gzip"

    parser = ConfigParser()
    parser.add_section("snapshot")
    parser.add_section("chunks")

    parser.set("snapshot", "description", json_manifest["description"])
    parser.set("snapshot", "created_at", json_manifest["created_at"])
    parser.set("snapshot", "pack_method", json_manifest["files"][0]["compressor"])

    for chunk, md5sum, size in reversed(json_manifest["files"][0]["chunks"]):
        parser.set("chunks", chunk, md5sum)

    LOG.debug("CONVERT: %s", parser.items("chunks"))
    return parser
Example #7
0
def convert_manifest(json_manifest):
    assert len(json_manifest["files"]) == 1
    assert json_manifest["files"][0]["compressor"] == "gzip"

    parser = ConfigParser()
    parser.add_section("snapshot")
    parser.add_section("chunks")

    parser.set("snapshot", "description", json_manifest["description"])
    parser.set("snapshot", "created_at", json_manifest["created_at"])
    parser.set("snapshot", "pack_method", json_manifest["files"][0]["compressor"])

    for chunk, md5sum, size in reversed(json_manifest["files"][0]["chunks"]):
        parser.set("chunks", chunk, md5sum)

    LOG.debug("CONVERT: %s", parser.items("chunks"))
    return parser
Example #8
0
def make_file(base_dir, name=None, size=None):
    name = name or uuid.uuid4().hex
    file_path = os.path.join(base_dir, name)
    if size is None:
        size = random.randint(2, 20)
    if not os.path.exists(base_dir):
        os.mkdir(base_dir)
    subprocess.call([
            "dd",
            "if=/dev/urandom",
            "of=%s" % file_path,
            "bs=1M",
            "count=%s" % size
    ], stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, close_fds=True)
    LOG.debug('Make file %s, %s' % (file_path, size))
    assert os.path.exists(file_path), file_path
    return file_path, size
Example #9
0

import os
import shutil
import tempfile
import mock
import logging

import boto
import swiftclient

from scalarizr.storage2 import cloudfs
from scalarizr.storage2.cloudfs import s3, gcs, swift, local
from scalarizr.storage2.cloudfs import LOG

LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.FileHandler("transfer_test.log", 'w'))


import httplib2
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials


def get_s3_conn():
    conn = boto.connect_s3(
                          host='s3.amazonaws.com',
                          aws_access_key_id=os.environ['EC2_ACCESS_KEY'],
                          aws_secret_access_key=os.environ['EC2_SECRET_KEY'])
    return conn
Example #10
0
                "http://folsom.enter.it:5000/v2.0",
                os.environ["ENTER_IT_USERNAME"], os.environ["ENTER_IT_API_KEY"], auth_version="2")


@after.each_feature
@this_feature_only
def teardown_feature(feat):
    for args in _RESTORE:
        setattr(*args)


#
# Logging
#

LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.FileHandler("transfer_test.log", 'w'))


#
#
#


STORAGES = {
        "s3": {
                "url": "s3://vova-new/vova_test",
                "driver": s3.S3FileSystem,
        },
        "gcs": {
                "url": "gcs://vova-test",