def Main(argv=sys.argv):
    logging.basicConfig(level=logging.INFO)

    try:
        argv = FLAGS(argv)  # parse flags
    except flags.Error as e:
        logging.error('%s\nUsage: %s ARGS\n%s', e, sys.argv[0], FLAGS)
        sys.exit(1)

    if FLAGS.bucket is None:
        raise ValueError('Must specify a valid bucket for this test.')

    logging.info('Storage provider is %s, bucket is %s, scenario is %s',
                 FLAGS.storage_provider, FLAGS.bucket, FLAGS.scenario)

    # This is essentially a dictionary lookup implemented in if
    # statements, but doing it this way allows us to not import the
    # modules of storage providers we're not using.
    if FLAGS.storage_provider == 'AZURE':
        import azure_service
        service = azure_service.AzureService()
    elif FLAGS.storage_provider == 'GCS':
        import gcs
        service = gcs.GCSService()
    elif FLAGS.storage_provider == 'S3':
        import s3
        service = s3.S3Service()
    else:
        raise ValueError('Invalid storage provider %s' %
                         FLAGS.storage_provider)

    if FLAGS.storage_provider == 'AZURE':
        # There are DNS lookup issues with the provider Azure when doing
        # "high" number of concurrent requests using multiple threads. The error
        # came from getaddrinfo() called by the azure python library. By reducing
        # the concurrent thread count to 10 or below, the issue can be mitigated.
        # If we lower the thread count, we need to lower the total object count
        # too so the time to write these object remains short
        global LIST_CONSISTENCY_THREAD_COUNT
        LIST_CONSISTENCY_THREAD_COUNT = 10
        global LIST_CONSISTENCY_OBJECT_COUNT
        LIST_CONSISTENCY_OBJECT_COUNT = 1000

    if FLAGS.scenario == 'OneByteRW':
        return OneByteRWBenchmark(service)
    elif FLAGS.scenario == 'ListConsistency':
        list_latency = {}
        list_inconsistency_window = {}
        inconsistent_list_count = {}
        for scenario in [
                LIST_AFTER_WRITE_SCENARIO, LIST_AFTER_UPDATE_SCENARIO
        ]:
            list_latency[scenario] = []
            list_inconsistency_window[scenario] = []
            inconsistent_list_count[scenario] = 0.0

        logging.info('Running list consistency tests for %d iterations...',
                     FLAGS.iterations)
        for _ in range(FLAGS.iterations):
            result = ListConsistencyBenchmark(service)
            # Analyze the result for both scenarios.
            for scenario in [
                    LIST_AFTER_WRITE_SCENARIO, LIST_AFTER_UPDATE_SCENARIO
            ]:
                result_consistent = '%s%s' % (scenario,
                                              LIST_RESULT_SUFFIX_CONSISTENT)
                if result_consistent in result:
                    if result[result_consistent]:
                        list_latency[scenario].append(
                            result['%s%s' %
                                   (scenario, LIST_RESULT_SUFFIX_LATENCY)])
                    else:
                        inconsistent_list_count[scenario] += 1
                        list_inconsistency_window[scenario].append(
                            result['%s%s' %
                                   (scenario,
                                    LIST_RESULT_SUFFIX_INCONSISTENCY_WINDOW)])

        # All iterations completed, ready to print out final stats.
        logging.info('\n\nFinal stats:')
        for scenario in [
                LIST_AFTER_WRITE_SCENARIO, LIST_AFTER_UPDATE_SCENARIO
        ]:
            logging.info(
                '%s consistency percentage: %f', scenario, 100 *
                (1 - inconsistent_list_count[scenario] / FLAGS.iterations))

            if len(list_inconsistency_window[scenario]) > 0:
                logging.info(
                    '%s inconsistency window: %s', scenario,
                    json.dumps(PercentileCalculator(
                        list_inconsistency_window[scenario]),
                               sort_keys=True))

            if len(list_latency[scenario]) > 0:
                logging.info(
                    '%s latency: %s', scenario,
                    json.dumps(PercentileCalculator(list_latency[scenario]),
                               sort_keys=True))

        return 0
    elif FLAGS.scenario == 'SingleStreamThroughput':
        return SingleStreamThroughputBenchmark(service)
    elif FLAGS.scenario == 'CleanupBucket':
        return CleanupBucket(service)
    elif FLAGS.scenario == 'MultiStreamWrite':
        return MultiStreamWrites(service)
    elif FLAGS.scenario == 'MultiStreamRead':
        return MultiStreamReads(service)
コード例 #2
0
ファイル: s3restore.py プロジェクト: TaRiven/snippets
    for line in stuff.split("\n"):
        fn = getFile(md5bucketname, line[0:2] + "/" + line)
        # TODO:  Need decrypt and concatenation here


def doStuff(bucket, md5bucketname, args):
    if len(args) == 0:
        showIndex(bucket)
    elif len(args) == 1:
        showDay(bucket, args[0])
    elif len(args) == 2:
        restoreFile(bucket, md5bucketname, args[0], args[1])
    else:
        raise exceptions.Exception("Don' know what to do with your args")


if __name__ == '__main__':
    # This doesn't exist in my older httplib, but s3 needs it
    try:
        httplib.MAXAMOUNT
    except AttributeError:
        httplib.MAXAMOUNT = 8192

    idxbucket, md5bucket, s3id, s3auth = sys.argv[1:5]

    rest = sys.argv[5:]

    s3conn = s3.S3Service(s3id, s3auth)

    doStuff(idxbucket, md5bucket, rest)
コード例 #3
0
ファイル: report.py プロジェクト: scotthou94/ican
 def __init__(self):
     self.s3Service = s3.S3Service()
コード例 #4
0
ファイル: s3sync.py プロジェクト: ktosiu/snippets-2
        # keys=bucket.keys(marker=keys[-1], delimiter='^')
        keys = __retry(bucket.keys, [], {'marker': keys[-1], 'delimiter': '^'})
        rv.union_update(sets.Set(keys))
    return rv


if __name__ == '__main__':
    # This doesn't exist in my older httplib, but s3 needs it
    try:
        httplib.MAXAMOUNT
    except AttributeError:
        httplib.MAXAMOUNT = 8192

    top, bucket, s3id, s3auth = sys.argv[1:]

    c = s3.S3Service(s3id, s3auth)
    bucket = c[bucket]
    assert bucket is not None
    remote = getAllBucketContents(bucket)
    print "Found %d keys" % (len(remote), )
    os.chdir(top)

    deleted = 0

    signal.signal(signal.SIGALRM, sighandler)

    added, local = doAdditions(bucket, remote)
    deleted = doDeletions(bucket, remote.difference(local))

    print "Added:", added
    print "Deleted:", deleted