def postproduction(self, records):
        IMAGES_DIR = self.IMAGES_DIR
        for r in records:
            for k, v in r.items():
                if 'date' in k or 'time' in k:
                    if r[k]:
                        r[k] = to_date(v)

            img_fn = r['image']
            img_src = os.path.join(BASE, IMAGES_DIR, img_fn)
            assert os.path.exists(img_src), img_src

            img_dst_folder = os.path.join(MEDIA_DIR, IMAGES_DIR)
            img_dst = os.path.join(img_dst_folder, img_fn)
            if not os.path.exists(img_dst_folder):
                os.makedirs(os.path.join(img_dst_folder))
            if not os.path.exists(img_dst):
                cmd = 'cp "%s" "%s"' % (img_src, img_dst)
                print cmd
                sh(cmd)
            r['image'] = os.path.join(IMAGES_DIR, img_fn)
            print r['image']

            from import_journalentry import OUT_FN as fn_journalentries_json
            journalentries = json.load(open(fn_journalentries_json))
            for je in journalentries:
                fr = je['fields']['folio_number_from']
                fr = int(fr)
                to = je['fields']['folio_number_to']
                to = int(to)
                if fr <= int(r['folio_number']) <= to:
                    r['journalentry'] = je['pk']
        return records
def read_hk_items():
    fn = os.path.join(BASE, HARTAKARUN, FN_HKITEM)
    #    images_dir = os.path.join(BASE, HARTAKARUN, 'HK Category Images')
    lines = csv.reader(codecs.open(fn, encoding='iso-8859-1'), delimiter=';')
    lines = list(lines)
    records = []
    headers = lines[0]
    print headers
    headers = [MAP_FIELDS_TO_MODEL_HKITEM[k] for k in headers]
    for l in lines[1:]:
        l = [unicode(x) for x in l]
        r = dict(zip(headers, l))
        if r['image'] == 'dummy.png':
            r['image'] = None
        else:
            sub_dir = r['image'][:3]
            sh('cp "%s" "%s"' %
               (os.path.join(IMAGES_DIR, sub_dir, r['image']), MEDIA_DIR))

        records.append(r)

    for x in records:
        print x
    print headers
    return records
 def postproduction(self, records): 
     IMAGES_DIRS = self.IMAGES_DIRS
     print IMAGES_DIRS
     fn = os.path.join(BASE,  'retrobooks/RB items', 'retrobookDuplicateScanReferenceInstance_20120524.csv')
     lines = csv.reader(codecs.open(fn, encoding=ENCODING), delimiter=DELIMITER)
     scanid2retrobookid = dict([(l[2], l[1]) for l in lines])
     for i in range(0, len(records)):
         r = records[i]
         r['retrobook'] = scanid2retrobookid[r['pk']]
         if int(r['retrobook']) >= 5:
             r['retrobook'] = None                
         
         if r['retrobook']:
             img_fn = r['image']
             img_src = os.path.join(BASE,'retrobooks/RB items', IMAGES_DIRS[r['retrobook']], 'CD FolioImages', img_fn)
             img_dst_folder = os.path.join(MEDIA_DIR, IMAGES_DIRS[r['retrobook']], 'CD FolioImages') 
             if not os.path.exists(img_src):
                 print 'could not find', img_src
             img_dst = os.path.join(img_dst_folder, img_fn)
             print img_dst
             if not os.path.exists(img_dst) :
                 cmd = 'cp "%s" "%s"' % (img_src, img_dst)
                 print cmd
                 sh(cmd)
             r['image'] = os.path.join(IMAGES_DIRS[r['retrobook']], 'CD FolioImages', img_fn)
         
         
     return records
Esempio n. 4
0
def processNextEvent():
    maxEventId = int(
        json.loads(
            sh(f'kubectl -n {env.CD_NAMESPACE} get configmap -ojson ' +
               getFullName('event-cursor')))['data']['eventID'])
    events = sh(
        f"kubectl -n {env.CD_NAMESPACE} get configmaps -o=jsonpath='{{.items[*].metadata.name}}'"
        +
        f" -lowner=quickcd,kind=GitHubEvent,status=pending,org={env.CD_GITHUB_ORG_NAME},repo={env.CD_GITHUB_REPO_NAME}"
    ).strip()
    if not events:
        return False
    eventIds = [
        eid for eid in (int(e.split('-').pop()) for e in events.split(' '))
        if eid <= maxEventId
    ]

    # run nonblocking handlers first if we can
    for eid in eventIds:
        eventResource = json.loads(
            sh(f'kubectl -n {env.CD_NAMESPACE} get configmap -o=json ' +
               getFullName(eid)))
        if runHandlers(json.loads(eventResource['data']['event']), False, eid,
                       eventResource['metadata']['labels']):
            return True

    earliestEventId = min(eventIds)
    eventResource = json.loads(
        sh(f'kubectl -n {env.CD_NAMESPACE} get configmap -o=json ' +
           getFullName(earliestEventId)))
    return runHandlers(json.loads(eventResource['data']['event']), True,
                       earliestEventId, eventResource['metadata']['labels'])
Esempio n. 5
0
def generateKubeconfig():
    sh(f'bx login --apikey {env.CD_BX_TOKEN} -a https://api.ng.bluemix.net')
    sh(f'bx cs region-set {env.CD_REGION_DASHED}')
    confPath = sh(f'bx cs cluster-config --export {env.CD_CLUSTER_NAME}'
                  ).split('=').pop()
    sh(f'ln -s {os.path.dirname(confPath)} $HOME/.kube'
       )  # some certs in that dir along with config
    sh(f'mv $HOME/.kube/{os.path.basename(confPath)} $HOME/.kube/config')
Esempio n. 6
0
def run(num_servers = None, # default 5
        client_command = 'build/Examples/SmokeTest',
        timeout = 10):

    if num_servers is None:
        num_servers = min(5, len(smokehosts))
    server_ids = range(1, num_servers + 1)

    with Sandbox() as sandbox:
        sh('rm -rf log')
        sh('rm -f debug/*')
        sh('scripts/initlog.py --serverid 1 --address %s' % smokehosts[0][0])

        for server_id in server_ids:
            host = smokehosts[server_id - 1]
            command = 'build/LogCabin --id %d --config smoketest.conf' % server_id
            print('Starting %s on %s' % (command, smokehosts[server_id - 1][0]))
            sandbox.rsh(smokehosts[server_id - 1][0], command, bg=True,
                        stderr=open('debug/%d' % server_id, 'w'))
            sandbox.checkFailures()

        print('Growing cluster')
        sh('build/Examples/Reconfigure %s' %
           ' '.join([h[0] for h in smokehosts]))

        print('Starting %s on localhost' % client_command)
        client = sandbox.rsh('localhost', client_command, bg=True,
                             stderr=open('debug/client', 'w'))

        start = time.time()
        while client.proc.returncode is None:
            sandbox.checkFailures()
            time.sleep(.1)
            if time.time() - start > timeout:
                raise Exception('timeout exceeded')
    def read_items(self, limit=None):
        print '/home/jelle/Dropbox/DASA Beta/Testdata/Bookshelves/ANRI_HR859/ANRI_HR859 FolioImages'
        images = []
        for dirname in self.SOURCE_DIRS:
            images += [os.path.join(dirname, fn) for fn in os.listdir(dirname)]
        records = []
        images.sort()
        backcovers = []
        frontcovers = []
        for img in images:
            if img.endswith('backcover.png'):
                backcovers.append(img)
            elif img.endswith('cover.png'):
                frontcovers.append(img)
        images = frontcovers + [
            x for x in images if x not in backcovers and x not in frontcovers
        ] + backcovers

        for i, img in enumerate(images):
            #            if 'cover.png' in img:
            #                continue

            relative_path_to_local_copy_of_image = os.path.join(
                img.split('/')[-3],
                img.split('/')[-1])
            dir = os.path.split(
                os.path.join(MEDIA_DIR,
                             relative_path_to_local_copy_of_image))[0]
            if not os.path.exists(dir):
                os.makedirs(dir)
            sh('cp "%s" "%s"' % (img, dir))
            pagenumber = img.split('.')[0].split('f')[-1]
            try:
                pagenumber = int(pagenumber)
                pagenumber = str(pagenumber)
            except:
                pagenumber = ''
            record = {
                'pk': str(i + 1),
                'institution': 'ANRI',
                'fonds': 'HR',
                'file_id': img.split('/')[-3].split('_')[1][2:],
                'image': relative_path_to_local_copy_of_image,
                'position': str(i + 1),
                'pagenumber': pagenumber,
            }

            record['retrobook'] = file_ids2pk[record['file_id']]
            records.append(record)
        return records
Esempio n. 8
0
def read_category_instance():
    fn = os.path.join(BASE, HARTAKARUN, FN_CATEGORYINSTANCE)
    images_dir = os.path.join(BASE, HARTAKARUN, 'HK Category Images')
    print 'reading', fn
    #    lines = csv.reader(codecs.open(fn, encoding='utf8'), delimiter=';')
    lines = unicode_csv_reader(fn)
    lines = [x for x in lines]
    headers = lines[0]
    headers = [MAP_FIELDS_TO_MODEL[k] for k in headers]
    records_category = []
    records_maincategory = []
    assert os.path.exists(images_dir), images_dir
    for l in lines[1:]:
        #        l = [to_unicode(x) for x in l]
        r = dict(zip(headers, l))
        if r['name_en'] in ['None']:
            continue
        if r['hartakarun_main_category'] == '0':
            r['hartakarun_main_category'] = None

        if not r['image'] or r['image'] == 'dummy.png':
            r['image'] = 'HK Category I.6.png'

        sh('cp "%s" "%s"' % (os.path.join(images_dir, r['image']), MEDIA_DIR))

        if r['image_intro'] == 'dummy.png':
            r['image_intro'] = None

        if not r['image_intro']:
            r['image_intro'] = r['image']
        else:
            cmd = 'cp "%s" "%s"' % (os.path.join(images_dir,
                                                 r['image_intro']), MEDIA_DIR)
            sh(cmd)
            print cmd

        if r['hartakarun_main_category']:
            records_category.append(r)
        else:
            del r['hartakarun_main_category']
            records_maincategory.append(r)


#        assert type(r['position']) == type(0), r['position']

#    for x in records:
#        print x
    print headers
    return records_maincategory, records_category
def read_hk_scans():
    fn = os.path.join(BASE, HARTAKARUN, FN_HKSCAN)
    #    images_dir = os.path.join(BASE, HARTAKARUN, 'HK Category Images')
    lines = csv.reader(codecs.open(fn, encoding='iso-8859-1'), delimiter=';')
    lines = list(lines)
    records = []
    headers = lines[0]
    print headers
    headers = [MAP_FIELDS_TO_MODEL_HKSCAN[k] for k in headers]
    for l in lines[1:]:
        l = [unicode(x) for x in l]
        r = dict(zip(headers, l))
        records.append(r)

        if int(r['hartakarun_item']) <= 3:
            sub_dir = 'HK%s/HK%s Folio Images' % (r['hartakarun_item'],
                                                  r['hartakarun_item'])
            image_fns = [
                'ID_ANRI_HR_%s_%s.png' % (r['file_id'], r['position']),
                'ID_ANRI_HR_%s_%s.jpg' % (r['file_id'], r['position']),
            ]
            for image_fn in image_fns:
                src = os.path.join(IMAGES_DIR, sub_dir, image_fn)
                if os.path.exists(src):
                    break


#	        src = os.path.join(IMAGES_DIR, sub_dir, image_fn)
            dst = os.path.join(MEDIA_DIR, sub_dir, image_fn)

            try:
                os.makedirs(os.path.join(MEDIA_DIR, sub_dir))
            except OSError:
                #probably because it already existed
                pass

            cmd = 'cp "%s" "%s"' % (src, dst)
            print cmd
            sh(cmd)
            r['image'] = dst

        if int(r['hartakarun_item']) > 3:
            r['hartakarun_item'] = None
    for x in records:
        print x
    print headers
    return records
Esempio n. 10
0
def runHandlers(event, blocking, eventID=0, labels={}):
    # now that we have the event of interest, fire all the (remaining) event handlers for it.
    workPerformed = False
    for handler in dispatchTable[event['type']]:
        if handler.isBlocking == blocking and handler.filterFn(
                event['payload']) and handler.id not in labels:
            if eventID:
                lastRun = float(labels.get(f'{handler.id}_last_run', '0'))
                attempts = int(labels.get(f'{handler.id}_attempts', '0'))
                if (time.time() - lastRun) / 60 < attempts**3:
                    continue
                writeLabels(
                    eventID, **{
                        f'{handler.id}_last_run': time.time(),
                        f'{handler.id}_attempts': attempts + 1
                    })
            workPerformed = True
            # reset workspace and call handler
            sh('rm -rf /tmp')
            sh('mkdir -m 777 /tmp')
            os.chdir('/tmp')
            setCurrentHandlerFnName(handler.name)
            print(f"Event {eventID}. Calling handler: {handler.name}")
            try:
                handler.handlerFn(event['payload'])
            except:
                print(traceback.format_exc())
                if blocking:
                    return True  # don't run other handlers for this event since encountered an error
                else:
                    continue  # run remaining nonblocking handlers for this evt
            if eventID:
                writeLabels(eventID, **{f'{handler.id}': 'complete'})

    # check if any handlers remaining and mark complete if not
    if eventID:
        allDone = True
        for handler in dispatchTable[event['type']]:
            if handler.filterFn(event['payload']) and handler.id not in labels:
                allDone = False
                break
        if allDone:
            writeLabels(eventID, status='handled')

    return workPerformed
Esempio n. 11
0
def read_items():
    fn = os.path.join(BASE, FN)
    #    images_dir = os.path.join(BASE, HARTAKARUN, 'HK Category Images')
    lines = csv.reader(codecs.open(fn, encoding=ENCODING), delimiter=DELIMITER)
    lines = list(lines)
    records = []
    headers = lines[0]
    print headers
    headers = [MAP_FIELDS_TO_MODEL[k] for k in headers]
    resolutions = read_resolutions()

    for l in lines[1:]:
        l = [unicode(x) for x in l]
        r = dict(zip(headers, l))
        for k, v in r.items():
            if 'date' in k or 'time' in k:
                r[k] = to_date(v)

        img_fn = r['image']
        img_src = os.path.join(BASE, IMAGES_DIR, img_fn)
        assert os.path.exists(img_src), img_src

        img_dst_folder = os.path.join(MEDIA_DIR, IMAGES_DIR)
        img_dst = os.path.join(img_dst_folder, img_fn)
        if not os.path.exists(img_dst_folder):
            os.makedirs(os.path.join(img_dst_folder))
        if not os.path.exists(img_dst):
            cmd = 'cp "%s" "%s"' % (img_src, img_dst)
            print cmd
            sh(cmd)
        r['image'] = os.path.join(IMAGES_DIR, img_fn)

        try:
            resolution = resolutions[r['file_id']]
            r['resolution'] = resolution['pk']
        except KeyError:
            r['resolution'] = None

        records.append(r)

    for x in records:
        print x
    print headers
    return records
Esempio n. 12
0
def main():
    arguments = docopt(__doc__)
    client_command = arguments['--client']
    num_servers = int(arguments['--servers'])
    reconf_opts = arguments['--reconf']
    if reconf_opts == "''":
        reconf_opts = ""
    timeout = int(arguments['--timeout'])

    server_ids = range(1, num_servers + 1)
    with Sandbox() as sandbox:
        sh('rm -rf smoketeststorage/')
        sh('rm -f debug/*')
        sh('mkdir -p debug')
        sh('scripts/initlog.py '
             '--serverid 1 '
             '--address %s '
             '--storage smoketeststorage' % smokehosts[0][0])

        for server_id in server_ids:
            host = smokehosts[server_id - 1]
            command = 'build/LogCabin --id %d --config smoketest.conf' % server_id
            print('Starting %s on %s' % (command, smokehosts[server_id - 1][0]))
            sandbox.rsh(smokehosts[server_id - 1][0], command, bg=True,
                        stderr=open('debug/%d' % server_id, 'w'))
            sandbox.checkFailures()

        print('Growing cluster')
        sh('build/Examples/Reconfigure %s %s' %
           (reconf_opts, ' '.join([h[0] for h in smokehosts[:num_servers]])))

        print('Starting %s on localhost' % client_command)
        client = sandbox.rsh('localhost', client_command, bg=True,
                             stderr=open('debug/client', 'w'))

        start = time.time()
        while client.proc.returncode is None:
            sandbox.checkFailures()
            time.sleep(.1)
            if time.time() - start > timeout:
                raise Exception('timeout exceeded')
Esempio n. 13
0
def main():
    arguments = docopt(__doc__)
    client_command = arguments['--client']
    server_command = arguments['--binary']
    num_servers = int(arguments['--servers'])
    reconf_opts = arguments['--reconf']
    if reconf_opts == "''":
        reconf_opts = ""
    timeout = int(arguments['--timeout'])

    server_ids = range(1, num_servers + 1)
    with Sandbox() as sandbox:
        sh('rm -rf smoketeststorage/')
        sh('rm -f debug/*')
        sh('mkdir -p debug')
        print('Initializing first server\'s log')
        sandbox.rsh(smokehosts[0][0],
                    '%s --bootstrap --id 1 --config smoketest.conf' %
                    server_command,
                    stderr=open('debug/bootstrap', 'w'))
        print()

        for server_id in server_ids:
            host = smokehosts[server_id - 1]
            command = ('%s --id %d --config smoketest.conf' %
                       (server_command, server_id))
            print('Starting %s on %s' %
                  (command, smokehosts[server_id - 1][0]))
            sandbox.rsh(smokehosts[server_id - 1][0],
                        command,
                        bg=True,
                        stderr=open('debug/%d' % server_id, 'w'))
            sandbox.checkFailures()

        print('Growing cluster')
        sh('build/Examples/Reconfigure %s %s' %
           (reconf_opts, ' '.join([h[0] for h in smokehosts[:num_servers]])))

        print('Starting %s on localhost' % client_command)
        client = sandbox.rsh('localhost',
                             client_command,
                             bg=True,
                             stderr=open('debug/client', 'w'))

        start = time.time()
        while client.proc.returncode is None:
            sandbox.checkFailures()
            time.sleep(.1)
            if time.time() - start > timeout:
                raise Exception('timeout exceeded')
Esempio n. 14
0
def setupGit():
    sh(f'git config --global user.email "{env.CD_EMAIL_ADDRESS}"')
    sh('git config --global user.name "quickcd"')

    # allow git to work without ssh for private repos
    if 'CD_GITHUB_TOKEN' in env:
        sh("git config --global url.'https://%s:x-oauth-basic@%s/'.insteadOf 'https://%s/'"
           % (env.CD_GITHUB_TOKEN, env.CD_GITHUB_DOMAIN, env.CD_GITHUB_DOMAIN))
Esempio n. 15
0
def main():
    arguments = docopt(__doc__)
    client_command = arguments["--client"]
    server_command = arguments["--binary"]
    num_servers = int(arguments["--servers"])
    reconf_opts = arguments["--reconf"]
    if reconf_opts == "''":
        reconf_opts = ""
    timeout = int(arguments["--timeout"])

    server_ids = range(1, num_servers + 1)
    with Sandbox() as sandbox:
        sh("rm -rf smoketeststorage/")
        sh("rm -f debug/*")
        sh("mkdir -p debug")
        print("Initializing first server's log")
        sandbox.rsh(
            smokehosts[0][0],
            "%s --bootstrap --id 1 --config smoketest.conf" % server_command,
            stderr=open("debug/bootstrap", "w"),
        )
        print()

        for server_id in server_ids:
            host = smokehosts[server_id - 1]
            command = "%s --id %d --config smoketest.conf" % (server_command, server_id)
            print("Starting %s on %s" % (command, smokehosts[server_id - 1][0]))
            sandbox.rsh(smokehosts[server_id - 1][0], command, bg=True, stderr=open("debug/%d" % server_id, "w"))
            sandbox.checkFailures()

        print("Growing cluster")
        sh("build/Examples/Reconfigure %s %s" % (reconf_opts, " ".join([h[0] for h in smokehosts[:num_servers]])))

        print("Starting %s on localhost" % client_command)
        client = sandbox.rsh("localhost", client_command, bg=True, stderr=open("debug/client", "w"))

        start = time.time()
        while client.proc.returncode is None:
            sandbox.checkFailures()
            time.sleep(0.1)
            if time.time() - start > timeout:
                raise Exception("timeout exceeded")
Esempio n. 16
0
    def start(self, directory):
        perf_dir = '%s/perf' % directory
        self.perf_dir = perf_dir
        common.pdsh(self.nodes, 'mkdir -p -m0755 -- %s' % perf_dir).communicate()

        perf_template = 'perf {} &'.format(self.args_template)
        local_node = common.get_localnode(self.nodes)
        if local_node:
            for pid_path in glob.glob(os.path.join(self.pid_dir, 'osd.*.pid')):
                with open(pid_path) as pidfile:
                    pid = pidfile.read().strip()
                    perf_cmd = perf_template.format(perf_dir=perf_dir, pid=pid)
                    runner = common.sh(local_node, perf_cmd)
                    self.perf_runners.append(runner)
        else:
            # ${pid} will be handled by remote's sh
            perf_cmd = perf_template.format(perf_dir=perf_dir, pid='${pid}')
            common.pdsh(self.nodes, ['for pid in `cat %s/osd.*.pid`;' % self.pid_dir,
                                     'do', perf_cmd,
                                     'done'])
Esempio n. 17
0
import sys
sys.path.append('./import_scripts')
from common import sh
ls = sh('git log | grep Date')
ls = ls.split('\n')
for x in ls:
    print x

print 'SINCE FIRST COMMIT:'

print '-' * 20
print len(ls), 'commits'
print len(ls) * 2, 'hours, given 2 hours per commit'
ls = [x[0:18] for x in ls]

ls = set(ls)
ls = list(set(ls))

print len(ls), 'days'
print len(ls) * 6, 'hours in 6 hour workdays'

print '-' * 20

print 'SINCE 1 OCTOBER:'

ls = sh('git log --since={2012-10-01} | grep Date')
ls = ls.split('\n')
print '-' * 20
print len(ls), 'commits'
print len(ls) * 2, 'hours, given 2 hours per commit'
ls = [x[0:18] for x in ls]
Esempio n. 18
0
def main():
    arguments = docopt(__doc__)
    client_command = arguments['--client']
    server_command = arguments['--binary']
    num_servers = int(arguments['--servers'])
    reconf_opts = arguments['--reconf']
    if reconf_opts == "''":
        reconf_opts = ""
    timeout = int(arguments['--timeout'])

    server_ids = range(1, num_servers + 1)
    cluster = "--cluster=%s" % ','.join([h[0] for h in
                                        smokehosts[:num_servers]])
    alphabet = [chr(ord('a') + i) for i in range(26)]
    cluster_uuid = ''.join([random.choice(alphabet) for i in range(8)])
    with Sandbox() as sandbox:
        sh('rm -rf smoketeststorage/')
        sh('rm -f debug/*')
        sh('mkdir -p debug')

        for server_id in server_ids:
            host = smokehosts[server_id - 1]
            with open('smoketest-%d.conf' % server_id, 'w') as f:
                f.write('serverId = %d\n' % server_id)
                f.write('listenAddresses = %s\n' % host[0])
                f.write('clusterUUID = %s\n' % cluster_uuid)
                f.write('snapshotMinLogSize = 1024')
                f.write('\n\n')
                try:
                    f.write(open('smoketest.conf').read())
                except:
                    pass


        print('Initializing first server\'s log')
        sandbox.rsh(smokehosts[0][0],
                    '%s --bootstrap --config smoketest-%d.conf' %
                    (server_command, server_ids[0]),
                   stderr=open('debug/bootstrap', 'w'))
        print()

        for server_id in server_ids:
            host = smokehosts[server_id - 1]
            command = ('%s --config smoketest-%d.conf' %
                       (server_command, server_id))
            print('Starting %s on %s' % (command, host[0]))
            sandbox.rsh(host[0], command, bg=True,
                        stderr=open('debug/%d' % server_id, 'w'))
            sandbox.checkFailures()

        print('Growing cluster')
        sh('build/Examples/Reconfigure %s %s set %s' %
           (cluster,
            reconf_opts,
            ' '.join([h[0] for h in smokehosts[:num_servers]])))

        print('Starting %s %s on localhost' % (client_command, cluster))
        client = sandbox.rsh('localhost',
                             '%s %s' % (client_command, cluster),
                             bg=True,
                             stderr=open('debug/client', 'w'))

        start = time.time()
        while client.proc.returncode is None:
            sandbox.checkFailures()
            time.sleep(.1)
            if time.time() - start > timeout:
                raise Exception('timeout exceeded')
#!/usr/bin/env python
from common import sh

sh("""

set -x
set -e

rm -rf /etc/apache2/sites-enabled/
rm -rf /etc/apache2/sites-available/
rm -rf /etc/apache2/sites-includes/

ln -s /yerk-data/etc/apache2/sites-enabled/ /etc/apache2/sites-enabled/
ln -s /yerk-data/etc/apache2/sites-available/ /etc/apache2/sites-available/
ln -s /yerk-data/etc/apache2/site-includes/ /etc/apache2/site-includes/

a2enmod headers
a2enmod ssl
a2enmod dav
a2enmod dav_fs
a2enmod proxy
a2enmod proxy_connect

/etc/init.d/apache2 restart
""")
Esempio n. 20
0
                m = re.search('Running for election in term (\d+)', line)
                if m is not None:
                    b['wake'] = int(m.group(1))
        terms = [b['term'] for b in server_beliefs.values()]
        leaders = [b['leader'] for b in server_beliefs.values()]
        if same(terms) and terms[0] > after_term:
            assert same(leaders), server_beliefs
            return {'leader': leaders[0],
                    'term': terms[0],
                    'num_woken': sum([1 for b in server_beliefs.values() if b['wake'] > after_term])}
        else:
            time.sleep(.25)
            sandbox.checkFailures()

with Sandbox() as sandbox:
    sh('rm -f debug/*')
    sh('mkdir -p debug')

    server_ids = range(1, num_servers + 1)
    servers = {}

    def start(server_id):
        host = hosts[server_id - 1]
        command = 'build/LogCabin -i %d' % server_id
        print('Starting LogCabin -i %d on %s' % (server_id, host[0]))
        server = sandbox.rsh(host[0], command, bg=True,
                             stderr=open('debug/%d' % server_id, 'w'))
        servers[server_id] = server

    for server_id in server_ids:
        start(server_id)
Esempio n. 21
0
def main():
    arguments = docopt(__doc__)
    client_commands = arguments['--client']
    server_command = arguments['--binary']
    num_servers = int(arguments['--servers'])
    reconf_opts = arguments['--reconf']
    if reconf_opts == "''":
        reconf_opts = ""
    timeout = int(arguments['--timeout'])
    killinterval = int(arguments['--killinterval'])
    launchdelay = int(arguments['--launchdelay'])

    server_ids = range(1, num_servers + 1)
    cluster = "--cluster=%s" % ','.join(
        [h[0] for h in smokehosts[:num_servers]])
    with Sandbox() as sandbox:
        sh('rm -rf smoketeststorage/')
        sh('rm -f debug/*')
        sh('mkdir -p debug')

        for server_id in server_ids:
            host = smokehosts[server_id - 1]
            with open('smoketest-%d.conf' % server_id, 'w') as f:
                try:
                    f.write(open('smoketest.conf').read())
                    f.write('\n\n')
                except:
                    pass
                f.write('serverId = %d\n' % server_id)
                f.write('listenAddresses = %s\n' % host[0])

        print('Initializing first server\'s log')
        sandbox.rsh(smokehosts[0][0],
                    '%s --bootstrap --config smoketest-%d.conf' %
                    (server_command, server_ids[0]),
                    stderr=open('debug/bootstrap', 'w'))
        print()

        processes = {}

        def launch_server(server_id):
            host = smokehosts[server_id - 1]
            command = ('%s --config smoketest-%d.conf -l %s' %
                       (server_command, server_id, 'debug/%d' % server_id))
            print('Starting %s on %s' % (command, host[0]))
            processes[server_id] = sandbox.rsh(host[0], command, bg=True)
            sandbox.checkFailures()

        for server_id in server_ids:
            launch_server(server_id)

        print('Growing cluster')
        sh('build/Examples/Reconfigure %s %s set %s' %
           (cluster, reconf_opts, ' '.join(
               [h[0] for h in smokehosts[:num_servers]])))

        for i, client_command in enumerate(client_commands):
            print('Starting %s %s on localhost' % (client_command, cluster))
            sandbox.rsh('localhost',
                        '%s %s' % (client_command, cluster),
                        bg=True,
                        stderr=open('debug/client%d' % i, 'w'))

        start = time.time()
        lastkill = start
        tolaunch = []  # [(time to launch, server id)]
        while True:
            time.sleep(.1)
            sandbox.checkFailures()
            now = time.time()
            if now - start > timeout:
                print('Timeout met with no errors')
                break
            if now - lastkill > killinterval:
                server_id = random.choice(processes.keys())
                print('Killing server %d' % server_id)
                sandbox.kill(processes[server_id])
                del processes[server_id]
                lastkill = now
                tolaunch.append((now + launchdelay, server_id))
            while tolaunch and now > tolaunch[0][0]:
                launch_server(tolaunch.pop(0)[1])
Esempio n. 22
0
def main():
    arguments = docopt(__doc__)

    growing=False
    if arguments['--growing']:
        growing=True

    sharedfs=False
    if arguments['--sharedfs']:
        sharedfs=True
    
    client_command = 'build/Examples/Benchmark'
    server_command = 'build/LogCabin'

    num_servers = len(smokehosts)
    

    server_ids = range(1, num_servers + 1)
    cluster = "--cluster=%s" % ','.join([h[1] for h in
                                        smokehosts[:num_servers]])

    with Sandbox() as sandbox:
        sh('rm -rf debug/')
        sh('mkdir -p debug')
        if growing:
            for server_id in server_ids:
                host = smokehosts[server_id - 1]
                with open('benchtest-%d.conf' % server_id, 'w') as f:
                    f.write('serverId = %d\n' % server_id)
                    f.write('listenAddresses = %s\n' % host[1])
                    f.write('storagePath = %s'
                            %os.path.join(os.getcwd(),'teststorage/'))
                    f.write('\n\n')
                    try:
                        f.write(open('benchtest.conf').read())
                    except:
                        pass

            if not sharedfs:
                print('Copying script files to remote servers',
                      'and removing previous storage')
                for server_id in server_ids:
                    host = smokehosts[server_id - 1]
                    sh('scp benchtest-%d.conf %s:%s'
                       %(server_id, host[0],os.getcwd()))
                    sandbox.rsh(host[0], 'rm -rf teststorage')
                print()
            else:
                sh('rm -rf teststorage')

        
            print('Initializing first server\'s log')
            sandbox.rsh(smokehosts[0][0],
                        '%s --bootstrap --config benchtest-%d.conf' %
                        (server_command, server_ids[0]),
                        stderr=open('debug/bootstrap_err', 'w'))
            print()

        
            for server_id in server_ids:
                host = smokehosts[server_id - 1]
                command = ('%s --config benchtest-%d.conf' %
                           (server_command, server_id))
                print('Starting %s on %s' % (command, host[1]))
                sandbox.rsh(host[0], command, bg=True,
                            stderr=open('debug/%derr' % server_id, 'w'))
                sandbox.checkFailures()


            print('Growing cluster')
            #Carreful at verbosity here
            sh('build/Examples/Reconfigure --verbosity=ERROR %s set %s' %
               (cluster,
                ' '.join([h[1] for h in smokehosts[:num_servers]])))

        
        lat_tp_test(client_command, cluster, num_servers)
Esempio n. 23
0
def lat_tp_test(client_command, cluster, num_servers):
    
    rps_res = "/home/raho/logcabin/results/rps_res%d"%num_servers
    lat_res = "/home/raho/logcabin/results/lat_res%d"%num_servers

    waits = [0, 2500, 5000, 10000, 25000, 50000, 75000, 100000]
    #[0, 50, 100, 250, 500, 1000, 5000, 10000, 20000, 30000]    
    opts = "--timeout=0 --writes=1600000 --size=1024 --threads=250"
    lat_opts = "--writes=100000 --size=1024 --wait=500000 --threads=1"

    binary = os.path.basename(client_command) 
    sh('echo start >%s'%rps_res)
    sh('echo start >%s'%lat_res)
    for w in waits:
        # to be determined better
        timeout = 15

        print('wait with %dus'%w)
        #Run all threads for throughput
        sh('echo wait with %dus >>%s'%(w,rps_res))
        sh(('%s %s --wait=%d %s 1>>%s 2>/dev/null &'
            % (client_command, opts, w, cluster,
               rps_res
            )))
        
        time.sleep(1)
        sh('echo wait with %dus >>%s'%(w,lat_res))
        sh(('%s %s --timeout=%d %s  1>>%s 2>/dev/null'
            % (client_command, lat_opts, timeout, cluster,
              lat_res
            )))

        sh(('killall %s'% binary))
        if w is not waits[-1]:
            print('sleeping..')
            time.sleep(6)
        
    sh('echo end >>%s'%rps_res)
    sh('echo end >>%s'%lat_res)
Esempio n. 24
0
def fetchAndSaveNewEvents():
    # todo: respect the x-poll-interval header
    # first run don't save anything, second run, even if no id saved previously, save all
    try:
        resource = json.loads(
            sh(f"kubectl -n {env.CD_NAMESPACE} get ConfigMap {getFullName('event-cursor')} -ojson"
               ))
    except:
        firstRun = True
        fetchedETag = '"none"'
        fetchedEventID = 0
        print("Can't retrieve config, assumiing first run.")
    else:
        firstRun = False
        fetchedETag = resource['data']['ETag']
        fetchedEventID = int(resource['data']['eventID'])

    newETag = fetchedETag
    newEventID = fetchedEventID

    url = env.CD_REPO_API_URL + '/events'
    events = []
    for i in range(15):  # Current GH limit is 10 pages
        if i > 10:
            raise Exception(
                "Github only provides 10 pages of results, something's wrong with pagination."
            )

        resp = http.request(
            'GET',
            url,
            headers=dict(http.headers, **{'If-None-Match': fetchedETag})
            if i == 0 and not firstRun else http.headers)

        if resp.status == 304:
            print("Fetching events 304 (nothing new)")
            return
        else:
            checkResponse(resp)

        print(f"Processing page {i+1}")

        if i == 0 and 'ETag' in resp.headers:
            newETag = resp.headers['ETag']

        tmpEvents = json.loads(resp.data.decode('utf-8'))
        tmpEventsFiltered = [
            e for e in tmpEvents if int(e['id']) > fetchedEventID
        ]
        events += tmpEventsFiltered
        if len(tmpEvents) != len(tmpEventsFiltered):
            break  # already reached event we saved last

        if firstRun:
            break  #during first run only want latest id

        linkKey = '; rel="next"'
        if 'Link' in resp.headers and linkKey in resp.headers['Link']:
            url = resp.headers['Link'].split(linkKey)[0].split(
                ',').pop().strip('< >')
        else:
            break

    if events:
        newEventID = int(
            events[0]['id']
        )  # may be not an event that we saved, we save only filtered ones we're listening for
    else:
        print('Interesting, no events at all!')  # new repo or old repo?

    # only save events on subsequent runs
    if not firstRun:
        # get rid of duplicates and filter
        eventDict = dict((e['id'], e) for e in events if any(
            handler.filterFn(e['payload'])
            for handler in dispatchTable[e['type']]))

        if eventDict:
            # use apply in case this command worked but saving cursor failed, resulting in resave
            # todo: not sure why yapf is using 3 space indenting here and below
            sh(f'kubectl -n {env.CD_NAMESPACE} apply -f-',
               input=json.dumps(dict(
                   kubeList,
                   items=[
                       dict(kubeConfigMap,
                            metadata={
                                'name': getFullName(e['id']),
                                'labels': {
                                    'owner': 'quickcd',
                                    'kind': 'GitHubEvent',
                                    'org': env.CD_GITHUB_ORG_NAME,
                                    'repo': env.CD_GITHUB_REPO_NAME,
                                    'status': 'pending'
                                }
                            },
                            data={
                                'event':
                                json.dumps(e,
                                           ensure_ascii=False,
                                           allow_nan=False)
                            }) for e in eventDict.values()
                   ]),
                                ensure_ascii=False,
                                allow_nan=False))

    # below we use create for first run to make sure we don't accidentally override a config that existed but failed to load above
    sh(f"kubectl -n {env.CD_NAMESPACE} {'create --save-config' if firstRun else 'apply'} -f-",
       input=json.dumps(dict(kubeConfigMap,
                             metadata={
                                 'name': getFullName('event-cursor'),
                                 'labels': {
                                     'owner': 'quickcd'
                                 }
                             },
                             data={
                                 'eventID': str(newEventID),
                                 'ETag': newETag
                             }),
                        ensure_ascii=False,
                        allow_nan=False))
Esempio n. 25
0
def main():
    arguments = docopt(__doc__)
    client_command = arguments['--client']
    server_command = arguments['--binary']
    num_servers = int(arguments['--servers'])
    reconf_opts = arguments['--reconf']
    if reconf_opts == "''":
        reconf_opts = ""
    timeout = int(arguments['--timeout'])

    server_ids = range(1, num_servers + 1)
    cluster = "--cluster=%s" % ','.join(
        [h[0] for h in smokehosts[:num_servers]])
    alphabet = [chr(ord('a') + i) for i in range(26)]
    cluster_uuid = ''.join([random.choice(alphabet) for i in range(8)])
    with Sandbox() as sandbox:
        sh('rm -rf smoketeststorage/')
        sh('rm -f debug/*')
        sh('mkdir -p debug')

        for server_id in server_ids:
            host = smokehosts[server_id - 1]
            with open('smoketest-%d.conf' % server_id, 'w') as f:
                f.write('serverId = %d\n' % server_id)
                f.write('listenAddresses = %s\n' % host[0])
                f.write('clusterUUID = %s\n' % cluster_uuid)
                f.write('snapshotMinLogSize = 1024')
                f.write('\n\n')
                try:
                    f.write(open('smoketest.conf').read())
                except:
                    pass

        print('Initializing first server\'s log')
        sandbox.rsh(smokehosts[0][0],
                    '%s --bootstrap --config smoketest-%d.conf' %
                    (server_command, server_ids[0]),
                    stderr=open('debug/bootstrap', 'w'))
        print()

        for server_id in server_ids:
            host = smokehosts[server_id - 1]
            command = ('%s --config smoketest-%d.conf' %
                       (server_command, server_id))
            print('Starting %s on %s' % (command, host[0]))
            sandbox.rsh(host[0],
                        command,
                        bg=True,
                        stderr=open('debug/%d' % server_id, 'w'))
            sandbox.checkFailures()

        print('Growing cluster')
        sh('build/Examples/Reconfigure %s %s set %s' %
           (cluster, reconf_opts, ' '.join(
               [h[0] for h in smokehosts[:num_servers]])))

        print('Starting %s %s on localhost' % (client_command, cluster))
        client = sandbox.rsh('localhost',
                             '%s %s' % (client_command, cluster),
                             bg=True,
                             stderr=open('debug/client', 'w'))

        start = time.time()
        while client.proc.returncode is None:
            sandbox.checkFailures()
            time.sleep(.1)
            if time.time() - start > timeout:
                raise Exception('timeout exceeded')
Esempio n. 26
0
                leaders[0],
                'term':
                terms[0],
                'num_woken':
                sum([
                    1 for b in server_beliefs.values()
                    if b['wake'] > after_term
                ])
            }
        else:
            time.sleep(.25)
            sandbox.checkFailures()


with Sandbox() as sandbox:
    sh('rm -f debug/*')
    sh('mkdir -p debug')

    server_ids = range(1, num_servers + 1)
    servers = {}

    def start(server_id):
        host = hosts[server_id - 1]
        command = 'build/LogCabin -i %d' % server_id
        print('Starting LogCabin -i %d on %s' % (server_id, host[0]))
        server = sandbox.rsh(host[0],
                             command,
                             bg=True,
                             stderr=open('debug/%d' % server_id, 'w'))
        servers[server_id] = server
Esempio n. 27
0
def main():
    arguments = docopt(__doc__)
    client_command = arguments['--client']
    server_command = arguments['--binary']
    num_servers = int(arguments['--servers'])
    reconf_opts = arguments['--reconf']
    if reconf_opts == "''":
        reconf_opts = ""
    client_opts = arguments['--clientops']
    if client_opts == "''":
        client_opts = ""

    it_num = int(arguments['--it'])
    max_ns = len(smokehosts)
    if max_ns < num_servers:
        print('Number of servers in config file: %s\n'%max_ns)
        num_servers = max_ns
    del max_ns
    
    sharedfs=False
    if arguments['--sharedfs']:
        sharedfs=True
    
    server_ids = range(1, num_servers + 1)
    cluster = "--cluster=%s" % ','.join([h[1] for h in
                                        smokehosts[:num_servers]])
    with Sandbox() as sandbox:
        sh('rm -rf debug/')
        sh('mkdir -p debug')
        sh('echo %s > debug/bench_cluster'%cluster)
        
        for server_id in server_ids:
            host = smokehosts[server_id - 1]
            with open('smoketest-%d.conf' % server_id, 'w') as f:
                f.write('serverId = %d\n' % server_id)
                f.write('listenAddresses = %s\n' % host[1])
                f.write('storagePath = %s'
                        %os.path.join(os.getcwd(),'teststorage/'))
                f.write('\n\n')
                try:
                    f.write(open('smoketest.conf').read())
                except:
                    pass

        if not sharedfs:
            print('Copying script files to remote servers',
                  'and removing previous storage')
            for server_id in server_ids:
                host = smokehosts[server_id - 1]
                sh('scp smoketest-%d.conf %s:%s'
                   %(server_id, host[0],os.getcwd()))
                sandbox.rsh(host[0], 'rm -rf teststorage')
            print()
        else:
            sh('rm -rf teststorage')
            
        print('Initializing first server\'s log')
        sandbox.rsh(smokehosts[0][0],
                    '%s --bootstrap --config smoketest-%d.conf' %
                    (server_command, server_ids[0]),
                    stderr=open('debug/bootstrap_err', 'w'))
        print()

        for server_id in server_ids:
            host = smokehosts[server_id - 1]
            command = ('%s --config smoketest-%d.conf' %
                       (server_command, server_id))
            print('Starting %s on %s' % (command, host[1]))
            sandbox.rsh(host[0], command, bg=True,
                        stderr=open('debug/%derr' % server_id, 'w'))
            sandbox.checkFailures()

        print('Growing cluster')
        #Carreful at verbosity here
        sh('build/Examples/Reconfigure --verbosity=ERROR %s %s set %s' %
           (cluster,
            reconf_opts,
            ' '.join([h[1] for h in smokehosts[:num_servers]])))

        print('Starting %s %s on localhost over %d iterations'
              % (client_command, cluster, it_num))
        print('\n')

#        time.sleep(100)
        
        for i in range(0,it_num):
            # client = sandbox.rsh('localhost',
            #                      ('%s %s %s'
            #                       % (client_command, client_opts, cluster)),
            #                      bg=False, stderr=open('debug/client', 'w')
            # )

            # small hack to redirect results to stderr for clean files
            sh(('%s %s %s 1>&2 2>/dev/null'
                % (client_command, client_opts, cluster)))
Esempio n. 28
0
                m = re.search('Running for election in term (\d+)', line)
                if m is not None:
                    b['wake'] = int(m.group(1))
        terms = [b['term'] for b in server_beliefs.values()]
        leaders = [b['leader'] for b in server_beliefs.values()]
        if same(terms) and terms[0] > after_term:
            assert same(leaders), server_beliefs
            return {'leader': leaders[0],
                    'term': terms[0],
                    'num_woken': sum([1 for b in server_beliefs.values() if b['wake'] > after_term])}
        else:
            time.sleep(.25)
            sandbox.checkFailures()

with Sandbox() as sandbox:
    sh('rm -f debug/*')

    server_ids = range(1, num_servers + 1)
    servers = {}

    def start(server_id):
        host = hosts[server_id - 1]
        command = 'build/LogCabin -i %d' % server_id
        print('Starting LogCabin -i %d on %s' % (server_id, host[0]))
        server = sandbox.rsh(host[0], command, bg=True,
                             stderr=open('debug/%d' % server_id, 'w'))
        servers[server_id] = server

    for server_id in server_ids:
        start(server_id)
Esempio n. 29
0
def main():
    arguments = docopt(__doc__)
    client_command = arguments['--client']
    server_command = arguments['--binary']
    num_servers = int(arguments['--servers'])
    reconf_opts = arguments['--reconf']
    if reconf_opts == "''":
        reconf_opts = ""
    timeout = int(arguments['--timeout'])
    killinterval = int(arguments['--killinterval'])
    launchdelay = int(arguments['--launchdelay'])

    server_ids = range(1, num_servers + 1)
    cluster = "--cluster=%s" % ','.join([h[0] for h in
                                        smokehosts[:num_servers]])
    with Sandbox() as sandbox:
        sh('rm -rf smoketeststorage/')
        sh('rm -f debug/*')
        sh('mkdir -p debug')

        for server_id in server_ids:
            host = smokehosts[server_id - 1]
            with open('smoketest-%d.conf' % server_id, 'w') as f:
                try:
                    f.write(open('smoketest.conf').read())
                    f.write('\n\n')
                except:
                    pass
                f.write('serverId = %d\n' % server_id)
                f.write('listenAddresses = %s\n' % host[0])


        print('Initializing first server\'s log')
        sandbox.rsh(smokehosts[0][0],
                    '%s --bootstrap --config smoketest-%d.conf' %
                    (server_command, server_ids[0]),
                   stderr=open('debug/bootstrap', 'w'))
        print()

        processes = {}

        def launch_server(server_id):
            host = smokehosts[server_id - 1]
            command = ('%s --config smoketest-%d.conf -l %s' %
                       (server_command, server_id, 'debug/%d' % server_id))
            print('Starting %s on %s' % (command, host[0]))
            processes[server_id] = sandbox.rsh(
                host[0], command, bg=True)
            sandbox.checkFailures()

        for server_id in server_ids:
            launch_server(server_id)

        print('Growing cluster')
        sh('build/Examples/Reconfigure %s %s set %s' %
           (cluster,
            reconf_opts,
            ' '.join([h[0] for h in smokehosts[:num_servers]])))

        print('Starting %s %s on localhost' % (client_command, cluster))
        client = sandbox.rsh('localhost',
                             '%s %s' % (client_command, cluster),
                             bg=True,
                             stderr=open('debug/client', 'w'))

        start = time.time()
        lastkill = start
        tolaunch = [] # [(time to launch, server id)]
        while True:
            time.sleep(.1)
            sandbox.checkFailures()
            now = time.time()
            if now - start > timeout:
                print('Timeout met with no errors')
                break
            if now - lastkill > killinterval:
                server_id = random.choice(processes.keys())
                print('Killing server %d' % server_id)
                sandbox.kill(processes[server_id])
                del processes[server_id]
                lastkill = now
                tolaunch.append((now + launchdelay, server_id))
            while tolaunch and now > tolaunch[0][0]:
                launch_server(tolaunch.pop(0)[1])