def checker(step):
    shared = reflection.getSharedObject()
    
    step(6,'sync the final state of the repository into a fresh local folder')
    #sleep(10)

    d = make_workdir()
    run_ocsync(d)

    fn = '%s/test.BIG'%d
    
    shared['w2d1'] = md5sum(fn)
    logger.info(shared['w2d1'])

    # print the status
    logger.info('final output %s',d)
    logger.info('content as reported by webdav')
    runcmd('curl -s -k -XPROPFIND %s | xmllint --format -'%oc_webdav_url())

    list_versions_on_server('test.BIG')

    for x in sorted(shared.keys()):
        logger.info('shared %s %s',x,shared[x])

    # verify the status

    error_check(shared['w2d1'] in [shared['w0v1'],shared['w0v2'],shared['w1v1']], "file downloaded by the checked does not correspond to any file created locally by the workers")

    # make sure that all versions stored on a server correpond to a version generated locally
    versions = get_md5_versions_on_server('test.BIG')
    
    for v5,name in versions:
        error_check(not v5 in [shared['w0v1'],shared['w0v2'], shared['w1v1']],
                    'a version %s (filename %s) does not correspond to any previously generated file'%(v5,name))
예제 #2
0
def reporter(step):
    shared = reflection.getSharedObject()

    # report on shared objects at every step
    for i in range(5): # until the last step used in this example
        step(i)
        logger.info("shared: %s",str(shared))
예제 #3
0
def worker1(step):

    shared = reflection.getSharedObject()

    step(1,'Preparation')
    d = make_workdir()
    run_ocsync(d)

    step(2,'Resync and check files added by worker0')

    fn = shared['filename']

    for k in [3,5,7]:
        step(k,'Resync and check files added by worker0')

        time.sleep(sleep)

        run_ocsync(d)

        dest = os.stat(os.path.join(d,fn))
        source = shared['source_stat']

        log_times(source=source,dest=dest)

        error_check(dest.st_mtime == round(dest.st_mtime), "Propagated mtime gets rounded up to the nearest second" ) 
        error_check(abs(source.st_mtime-dest.st_mtime) <= 1.0, 'Expecting not too have more than 1s difference on mtime') # NOT TRUE!
예제 #4
0
def reporter(step):
    shared = reflection.getSharedObject()

    # report on shared objects at every step
    for i in range(5):  # until the last step used in this example
        step(i)
        logger.info("shared: %s", str(shared))
예제 #5
0
def worker1(step):

    shared = reflection.getSharedObject()

    step(1, 'Preparation')
    d = make_workdir()
    run_ocsync(d)

    step(2, 'Resync and check files added by worker0')

    fn = shared['filename']

    for k in [3, 5, 7]:
        step(k, 'Resync and check files added by worker0')

        time.sleep(sleep)

        run_ocsync(d)

        dest = os.stat(os.path.join(d, fn))
        source = shared['source_stat']

        log_times(source=source, dest=dest)

        error_check(dest.st_mtime == round(dest.st_mtime),
                    "Propagated mtime gets rounded up to the nearest second")
        error_check(
            abs(source.st_mtime - dest.st_mtime) <= 1.0,
            'Expecting not too have more than 1s difference on mtime'
        )  # NOT TRUE!
예제 #6
0
def push_to_monitoring(returncode, total_duration):
    monitoring_points = []
    shared = reflection.getSharedObject()
    if 'monitoring_points' in shared.keys():
        monitoring_points = shared['monitoring_points']

    monitoring_type = config.get('monitoring_type', None)
    if monitoring_type == 'prometheus':
        handle_prometheus_push(returncode, total_duration, monitoring_points)
    elif monitoring_type == 'local':
        handle_local_push(returncode, total_duration, monitoring_points)
예제 #7
0
def helloB(step):
    logger.debug("dir() %s",dir())
    
    shared = reflection.getSharedObject()

    step(2,'modifying and reassigning n, xyz')
    shared['n'] += 111
    shared['xyz'] += [4]

    step(3, 'checking integrity')
    error_check(shared['n']==222, 'problem handling shared n=%d'%shared['n'])
    error_check(list(shared['xyz'])==[1,2,3,4], 'problem handlign shared xyz=%s'%repr(shared['xyz']))
예제 #8
0
def commit_to_monitoring(metric, value, timestamp=None):
    shared = reflection.getSharedObject()
    if not 'monitoring_points' in shared.keys():
        shared['monitoring_points'] = []

    # Create monitoring metric point
    monitoring_point = dict()
    monitoring_point['metric'] = metric
    monitoring_point['value'] = value
    monitoring_point['timestamp'] = timestamp

    # Append metric to shared object
    monitoring_points = shared['monitoring_points']
    monitoring_points.append(monitoring_point)
    shared['monitoring_points'] = monitoring_points
예제 #9
0
def helloB(step):
    logger.debug("dir() %s", dir())

    shared = reflection.getSharedObject()

    step(2, 'modifying and reassigning n, xyz')
    shared['n'] += 111
    shared['xyz'] += [4]

    step(3, 'checking integrity')
    error_check(shared['n'] == 222,
                'problem handling shared n=%d' % shared['n'])
    error_check(
        list(shared['xyz']) == [1, 2, 3, 4],
        'problem handlign shared xyz=%s' % repr(shared['xyz']))
예제 #10
0
def helloA(step):
    logger.debug("globals() %s", globals().keys())

    # Sharing of variables between workers - see below.
    shared = reflection.getSharedObject()

    step(0, 'defining n')

    shared['n'] = 111

    # Variable 'n' is now shared and visible to all the workers.
    # This happens when a value of the variable is assigned.
    #
    # Limitations: Workers A and B should not modify the same shared
    # variable in parallel (that it in the same step). Also that
    # worker that sets the variable should do it in a step preceding
    # the steps in which other workers are making use of it. Only this
    # will guarantee that the value is set before someone else is
    # trying to make use of it.
    #
    # If you need more than one worker to modify the same
    # shared variable make sure this happens in separate steps.

    step(1, 'defining xyz')

    # Contrary to the plain types (string,int,float) here we share a list - see limitations below.
    shared['xyz'] = [1, 2, 3]

    # If you modify the value in place of a shared.attribute
    # (e.g. list.append or list.sort) then this is NOT visible to other
    # processes until you really make the assignment.
    #
    # Some ideas how to handle lists by assigning a new value:
    #   * use shared['list']+=[a] instead of shared['list'].append(a)
    #   * use shared['list']=sorted(shared['list']) instead of shared['list'].sort()
    #
    step(2, 'waiting...')

    step(3, 'checking integrity')

    # this is an non-fatal assert - error will be rerpoted and test marked as failed but execution will continue
    error_check(shared['n'] == 222,
                'problem handling shared n=%d' % shared['n'])

    # this is a fatal assert - execution will stop immediately
    fatal_check(
        list(shared['xyz']) == [1, 2, 3, 4],
        'problem handlign shared xyz=%s' % repr(shared['xyz']))
예제 #11
0
def helloA(step):
    logger.debug("globals() %s",globals().keys())


    # Sharing of variables between workers - see below.
    shared = reflection.getSharedObject()

    step(0,'defining n')
    
    shared['n'] = 111
    
    # Variable 'n' is now shared and visible to all the workers.
    # This happens when a value of the variable is assigned.
    #
    # Limitations: Workers A and B should not modify the same shared
    # variable in parallel (that it in the same step). Also that
    # worker that sets the variable should do it in a step preceding
    # the steps in which other workers are making use of it. Only this
    # will guarantee that the value is set before someone else is
    # trying to make use of it.
    # 
    # If you need more than one worker to modify the same
    # shared variable make sure this happens in separate steps.


    step(1,'defining xyz')

    # Contrary to the plain types (string,int,float) here we share a list - see limitations below.
    shared['xyz'] = [1,2,3]

    # If you modify the value in place of a shared.attribute
    # (e.g. list.append or list.sort) then this is NOT visible to other
    # processes until you really make the assignment.
    # 
    # Some ideas how to handle lists by assigning a new value:
    #   * use shared['list']+=[a] instead of shared['list'].append(a)
    #   * use shared['list']=sorted(shared['list']) instead of shared['list'].sort() 
    #
    step(2,'waiting...')

    step(3,'checking integrity')

    # this is an non-fatal assert - error will be rerpoted and test marked as failed but execution will continue
    error_check(shared['n']==222, 'problem handling shared n=%d'%shared['n'])

    # this is a fatal assert - execution will stop immediately
    fatal_check(list(shared['xyz'])==[1,2,3,4], 'problem handlign shared xyz=%s'%repr(shared['xyz']))
예제 #12
0
def worker0(step):

    # do not cleanup server files from previous run
    reset_owncloud_account()

    # cleanup all local files for the test
    reset_rundir()

    shared = reflection.getSharedObject()

    step(1, 'Preparation')
    d = make_workdir()
    run_ocsync(d)
    k0 = count_files(d)

    # TODO: add OS-type and fstype information, e.g. sources: /etc/mtab; df -T -P

    fn = "TIMESTAMP-TEST.DAT"
    absfn = os.path.join(d, fn)
    shared['filename'] = fn

    step(2, 'Add new file')

    create_hashfile(d, fn, size=1000)

    stat_before = os.stat(absfn)
    run_ocsync(d)
    stat_after = os.stat(absfn)
    shared['source_stat'] = stat_after

    log_times(stat_before=stat_before, stat_after=stat_after)
    assert (
        stat_before.st_mtime == stat_after.st_mtime
    )  # paranoia check, sync client should not modify local source file

    step(4, 'Add a new version (new local inode)')

    create_hashfile(d, fn, size=1000)
    run_ocsync(d)
    shared['source_stat'] = os.stat(absfn)

    step(6, 'Add a new version (same local inode)')

    modify_file(absfn, 'x', 1, 100)  # append to existing file
    run_ocsync(d)
    shared['source_stat'] = os.stat(absfn)
예제 #13
0
def worker0(step):    

    # do not cleanup server files from previous run
    reset_owncloud_account()

    # cleanup all local files for the test
    reset_rundir()

    shared = reflection.getSharedObject()

    step(1,'Preparation')
    d = make_workdir()
    run_ocsync(d)
    k0 = count_files(d)

    # TODO: add OS-type and fstype information, e.g. sources: /etc/mtab; df -T -P

    fn = "TIMESTAMP-TEST.DAT"
    absfn = os.path.join(d,fn)
    shared['filename'] = fn

    step(2,'Add new file')

    create_hashfile(d,fn,size=1000)

    stat_before = os.stat(absfn)
    run_ocsync(d)
    stat_after = os.stat(absfn)
    shared['source_stat'] = stat_after

    log_times(stat_before=stat_before,stat_after=stat_after)
    assert(stat_before.st_mtime == stat_after.st_mtime) # paranoia check, sync client should not modify local source file

    step(4,'Add a new version (new local inode)')

    create_hashfile(d,fn,size=1000)
    run_ocsync(d)
    shared['source_stat'] = os.stat(absfn)


    step(6,'Add a new version (same local inode)')

    modify_file(absfn,'x',1,100) # append to existing file
    run_ocsync(d)
    shared['source_stat'] = os.stat(absfn)
예제 #14
0
def any_worker(step):
    shared = reflection.getSharedObject()

    shared['k'] = 0

    step(1, None)

    shared['k'] += 1

    step(2, None)
    shared['k'] += 1

    step(3, None)
    shared['k'] += 1

    step(4, 'finish')

    logger.info("k=%d, expected %d", shared['k'], N * 3)
예제 #15
0
def any_worker(step):
    shared=reflection.getSharedObject()

    shared['k'] = 0
    
    step(1,None)

    shared['k'] += 1

    step(2,None)
    shared['k'] += 1
    
    step(3,None)
    shared['k'] += 1

    step(4,'finish')

    logger.info("k=%d, expected %d",shared['k'],N*3)
예제 #16
0
def worker0(step):
    shared = reflection.getSharedObject()
    
    reset_owncloud_account()
    reset_rundir()

    #versions = get_md5_versions_on_server('test.BIG')    
    
    step(1,'create initial content and sync')

    d = make_workdir()
    fn = '%s/test.BIG'%d
    createfile(fn,'0',count=100000,bs=1000)
    shared['w0v1'] = md5sum(fn)
    logger.info(shared['w0v1'])
    hexdump(fn)

    run_ocsync(d)

    step(3,'modify local content')

    createfile(fn,'1',count=200,bs=1000000) # create large file -> it will take longer to sync
    shared['w0v2'] = md5sum(fn)
    logger.info(shared['w0v2'])
    hexdump(fn)

    step(4,'sync local content')

    run_ocsync(d)

    shared['w0d1'] = md5sum(fn)
    logger.info(shared['w0d1'])
    hexdump(fn)

    if shared['w0d1'] == shared['w0v2']:
        logger.info("Content NOT changed locally")
    else:
        logger.info("CONTENT CHANGED LOCALLY")
    
    #step(4)
    #run_ocsync(d)
    #step(5)
    logger.info('output %s',d)
예제 #17
0
def checker(step):
    shared = reflection.getSharedObject()
    
    step(6,'sync the final state of the repository into a fresh local folder')
    #sleep(10)

    d = make_workdir()
    run_ocsync(d)

    fn = '%s/test.BIG'%d
    
    shared['w2d1'] = md5sum(fn)
    logger.info(shared['w2d1'])

    # print the status
    logger.info('final output %s',d)
    logger.info('content as reported by webdav')
    #runcmd('curl -s -k -XPROPFIND %s | xmllint --format -'%oc_webdav_url()) #FIXME: no request body, unsupported by EOS

    #DISABLED FOR NOW
    #list_versions_on_server('test.BIG')

    for x in sorted(shared.keys()):
        logger.info('shared %s %s',x,shared[x])

    # verify the status

    error_check(shared['w2d1'] in [shared['w0v1'],shared['w0v2'],shared['w1v1']], "file downloaded by the checker does not correspond to any file created locally by the workers")

    if False:
       # DISABLED FOR NOW 
       # make sure that all versions stored on a server correpond to a version generated locally
       versions = get_md5_versions_on_server('test.BIG')
       
       for v5,name in versions:
           error_check(not v5 in [shared['w0v1'],shared['w0v2'], shared['w1v1']],
                       'a version %s (filename %s) does not correspond to any previously generated file'%(v5,name))
def worker1(step):
    shared = reflection.getSharedObject()
    
    step(2,'sync initial state created by worker 0')

    d = make_workdir()
    run_ocsync(d)

    fn = '%s/test.BIG'%d

    shared['w1d1'] = md5sum(fn)
    logger.info(shared['w1d1'])
    error_check(shared['w1d1'] == shared['w0v1'],'downloaded files does not match the initially created file')

    step(3,'modify local content')

    createfile(fn,'2',count=200000,bs=1000) # create large file -> it will take longer to sync

    shared['w1v1'] = md5sum(fn)
    logger.info(shared['w1v1'])
    hexdump(fn)

    step(4,'sync modified file')

    # add a bit of delay to make sure worker1 starts later than worker0
    sleep(2)

    run_ocsync(d)

    shared['w1d2'] = md5sum(fn)
    logger.info(shared['w1d2'])
    hexdump(fn)

    step(5)

    logger.info('output %s',d)
예제 #19
0
def worker1(step):
    shared = reflection.getSharedObject()
    
    step(2,'sync initial state created by worker 0')

    d = make_workdir()
    run_ocsync(d)

    fn = '%s/test.BIG'%d

    shared['w1d1'] = md5sum(fn)
    logger.info(shared['w1d1'])
    error_check(shared['w1d1'] == shared['w0v1'],'downloaded files does not match the initially created file')

    step(3,'modify local content')

    createfile(fn,'2',count=200000,bs=1000) # create large file -> it will take longer to sync

    shared['w1v1'] = md5sum(fn)
    logger.info(shared['w1v1'])
    hexdump(fn)

    step(4,'sync modified file')

    # add a bit of delay to make sure worker1 starts later than worker0
    sleep(2)

    run_ocsync(d)

    shared['w1d2'] = md5sum(fn)
    logger.info(shared['w1d2'])
    hexdump(fn)

    step(5)

    logger.info('output %s',d)