Esempio n. 1
0
def runMain():

    if utils.inputMatch([]):
        utils.print_header('expe')
        utils.print_option("step1", "launch cluster, and cut image")
        utils.print_option("step2", "deploy image on hdfs")
        utils.print_option("step3", "get status of image")
        utils.print_option("step4", "produce L2")
        utils.print_option("step5", "view produced image")
        utils.print_option("view", "view distributed")
        utils.print_option("clean", "clear hdfs ")
        utils.print_option("full", "run all")
        utils.print_option("run-perou", "run perou")
        utils.print_option("view-perou", "view perou")
        utils.print_option("full-chine", "cluster -> china")
        utils.print_option("run-chine", "run china")
        utils.print_option("deep-restore", "restore deep models")
        utils.print_option("deep-sync", "restore deep models")
        utils.print_option("deep-download", "restore deep models")
        utils.print_header('')

    if utils.inputMatch(['deep-restore']):
        restoreDeep()

    if utils.inputMatch(['deep-sync']):
        syncDeep()

    if utils.inputMatch(['deep-download']):
        downloadDeep()

    if utils.inputMatch(['deep-retrosync']):
        retrosyncDeep()

    if utils.inputMatch(['deep-tar']):
        uploadDeep()

    if utils.inputMatch(['run-perou']):
        runBigPerou()

    if utils.inputMatch(['view-perou']):
        viewPerou()


    if utils.inputMatch(['view']):
        viewProductionDistributed()

    if utils.inputMatch(['clean']):
        cleanHdfs()

    if utils.inputMatch(['full']):
        full()

    if utils.inputMatch(['full-chine']):
        fullChine()

    if utils.inputMatch(['step1']):

        main.setupFull()
        additionPython()
        cutBrisbane()

    if utils.inputMatch(['step1b']):

        cutBrisbane()

    if utils.inputMatch(['step2']):

        storeBrisbaneOnHdfs()

    if utils.inputMatch(['step3']):

        getFileStatus()

    if utils.inputMatch(['step4']):
        runProduction()

    if utils.inputMatch(['step5']):
        viewProduction()

    if utils.inputMatch(['kill']):
        cluster.instanceKillAll()
Esempio n. 2
0
def main():

    if utils.inputMatch([]):
        utils.print_header('hadoop')
        utils.print_option("copy", "setup files (copy jar files)")
        utils.print_option("setup", "setup configuration (which IPs)")
        utils.print_option("bashrc", "add startup to bashrc")
        utils.print_option("format", "format hadoop")
        utils.print_option("start-dfs", "run start-dfs")
        utils.print_option("stop-dfs", "run stop-dfs")
        utils.print_option("status", "get status")
        utils.print_option(
            "full", "run full setup (copy->setup->bashrc->format->start-dfs)")
        utils.print_header('')

    if utils.inputMatch(['copy']):
        setupSoftFiles()

    if utils.inputMatch(['setup']):
        setupConfigurationFiles()

    if utils.inputMatch(['bashrc']):
        setupBashrc()

    if utils.inputMatch(['format']):
        formatFileSystem()

    if utils.inputMatch(['start-dfs']):
        startDfs()

    if utils.inputMatch(['stop-dfs']):
        stopDfs()

    if utils.inputMatch(['status']):
        getStatus()

    if utils.inputMatch(['full']):
        setupFullWithFormat()
Esempio n. 3
0
def main():

    if utils.inputMatch([]):
        utils.print_header('package')
        utils.print_option(
            "restore", "copy delivery from common data storage to each local disk")
        utils.print_option(
            "postinstallscript", "run postinstallscript scripts")
        utils.print_option("bashrc", "add startup to bashrc")
        utils.print_option("profile", "add startup to etc/profile.d")
        utils.print_option("sync", "upload new code")
        utils.print_option("full", "run full setup")
        utils.print_header('')

    if utils.inputMatch(['restore']):
        restoreFromStore()

    if utils.inputMatch(['postinstallscript']):
        setupPostinstall()

    if utils.inputMatch(['full']):
        setupFull()

    if utils.inputMatch(['sync']):
        synchronize()

    if utils.inputMatch(['bashrc']):
        setupBashrc()

    if utils.inputMatch(['profile']):
        setupProfile()

    if utils.inputMatch(['addjars']):
        addjars()
Esempio n. 4
0
def runMain():

    if utils.inputMatch([]):
        utils.print_header('deep')

        utils.print_option("full", "cluster->restore->download deep eval")
        utils.print_option("restore-eval", "restore deep models evaluations")
        utils.print_option("upload-eval", "tar and upload deep models evaluations")

        utils.print_option("restore-model", "restore deep models (download from bucket and untar)")
        utils.print_option("eval-tomaster", "copy evals from slaves to master")
        utils.print_option("eval-frommaster", "copy evals from master to slaves")
        utils.print_option("eval-permanent", "copy evals from slaves to master")
            
        utils.print_option("eval-sync", "eval-tomaster->eval-frommaster")
        
        utils.print_header('')

    if utils.inputMatch(['full']):
        full()


    if utils.inputMatch(['restore-eval']):
        restoreDeepEval()

    if utils.inputMatch(['restore-model']):
        restoreDeepModels()

    if utils.inputMatch(['eval-tomaster']):
        syncEvalToMaster()

    if utils.inputMatch(['eval-frommaster']):
        syncEvalFromMaster()

    if utils.inputMatch(['upload-eval']):
        uploadDeepEval()

    if utils.inputMatch(['eval-permanent']):
        permanentEvalToMaster()

    if utils.inputMatch(['upload-subdirs'], doPartial=True):
        host = sys.argv[2]
        source = sys.argv[3]
        prefix = sys.argv[4]

        suffix = sys.argv[5]
        utils.ssh(cluster.ipGetAny(host),'cd {0};ls -1 -d */ > listdir.txt'.format(source))
        utils.exec_command('gcloud compute copy-files '+host+':'+os.path.join(source, 'listdir.txt')+' .')
        
        with open ("listdir.txt", "r") as myfile:
            data=myfile.readlines()

        utils.exec_command('rm listdir.txt')
        data = [os.path.dirname(d.rstrip()) for d in data]

        command = getUploadCommand(source, data, prefix, suffix)

        print command
        utils.ssh(cluster.ipGetAny(host), command)
Esempio n. 5
0
def main():

    if utils.inputMatch([]):
        utils.print_header('spark')
        utils.print_option("copy", "setup files (copy jar files)")
        utils.print_option("setup", "setup configuration (which IPs)")
        utils.print_option("bashrc", "add startup to bashrc")
        utils.print_option("start-all", "run start-dfs")
        utils.print_option("launchui", "display ui")
        utils.print_option(
            "full", "run full setup (copy->setup->bashrc->start-all)")
        utils.print_header('')

    if utils.inputMatch(['copy']):
        setupSoftFiles()

    if utils.inputMatch(['setup']):
        setupConfigurationFiles()

    if utils.inputMatch(['restart']):
        restart()

    if utils.inputMatch(['bashrc']):
        setupBashrc()

    if utils.inputMatch(['start-all']):
        startAll()

    if utils.inputMatch(['launchui']):
        launchUi()

    if utils.inputMatch(['full']):
        setupFull()
Esempio n. 6
0
def main(prefix=''):

    if utils.inputMatch([]):

        utils.print_header("Cluster")
        utils.print_option("full", "launch [regular]-> keys")
        utils.print_option("launch", "launch cluster with various setup", True)
        utils.print_option("destroy/kill", "kill all instances")
        utils.print_option("list", "list all instances, using cache")
        utils.print_option(
            "list-clean", "list all instances after cleaning cache")
        utils.print_option("keys", "setup ssh keys on full cluster")
        utils.print_option("disks", "handle disk creation and deletion", True)
        utils.print_option(
            "rsync", "shortcuts to various synchronisation commands", True)
        utils.print_header("")
        utils.print_option("tunnel", "create a tunnel for connection")
        utils.print_option("network", "open network 8080")
        utils.print_option(
            "runall", "[runall command] run a command on every computer")

    if utils.inputMatch(['launch']):
        utils.print_header("Cluster Launch")
        utils.print_option(
            "launch regular", "launch cluster with localdata and commondata (ro)")
        utils.print_option(
            "launch naked", "launch naked cluster (no common, no local)")
        utils.print_option(
            "launch master-commonwritable", "launch master with common writable")
        utils.print_option(
            "launch master-naked", "launch master without stuff")
        utils.print_option(
            "launch commondata-only", "launch cluster with common (ro), but no local storage")
        utils.print_header("")

    if utils.inputMatch(['launch', 'regular']):
        mainLaunchRegular()

    if utils.inputMatch(['launch', 'naked']):
        mainLaunchNaked()

    if utils.inputMatch(['launch', 'commondata-only']):
        mainLaunchCommonDataOnly()

    if utils.inputMatch(['launch', 'master-commonwritable']):
        mainLaunchMasterCommonWritable()

    if utils.inputMatch(['launch', 'master-naked']):
        mainLaunchMasterNaked()

    if utils.inputMatch(['disks']):
        utils.print_header("disk handling")
        utils.print_option("disks create", "create data disks")
        utils.print_option("disks delete", "delete data disks")
        utils.print_header("")

    if utils.inputMatch(['disks', 'create']):
        clusterCreateDiskLocalData()

    if utils.inputMatch(['disks', 'delete']):
        clusterDeleteDiskLocalData()

    if utils.inputMatch(['list']):
        utils.print_header("INSTANCES")
        (listnodes, nodes) = instanceListAll()
        print listnodes
        print nodes

    if utils.inputMatch(['network']):
        networkOpen()

    if utils.inputMatch(['tunnel']):
        tunnel()

    if utils.inputMatch(['screenrc']):
        setScreenRc()

    if utils.inputMatch(['list-clean']):
        utils.print_header("Cleaning instance list ...")
        instanceListClean()
        utils.print_header("INSTANCES")

        (listnodes, nodes) = instanceListAll()
        print listnodes
        print nodes

    if utils.inputMatch(['keys']):
        sshKeysDeploy()

    if utils.inputMatch(['kill']) or utils.inputMatch(['destroy']):
        instanceKillAll()

    if utils.inputMatch(['rsync'], doPartial=False):
        utils.print_header("Cluster rsync")
        utils.print_option(
            "rsync to-master", "[sourceDir targetDir option1 option2] For syncrhonisation on master only")

        utils.print_option(
            "rsync from-master", "[sourceDir targetDir option1 option2] For syncrhonisation on master only")

        utils.print_option(
            "rsync all", "[sourceDir targetDir] For synchronisation on all nodes")

        utils.print_option(
            "rsync to-any", "[hostname sourceDir targetDir option1 option2] For syncrhonisation on hostname only")

        utils.print_option(
            "rsync from-any", "[hostname sourceDir targetDir option1 option2] For syncrhonisation from hostname only")

    if utils.inputMatch(['rsync', 'to-master'], doPartial=True):
        source = sys.argv[3]
        target = sys.argv[4]

        opts = sys.argv[5:]

        utils.rsyncLocalhostToOther(ipGetMaster(), source, target, opts)

    if utils.inputMatch(['rsync', 'from-master'], doPartial=True):
        source = sys.argv[3]
        target = sys.argv[4]

        opts = sys.argv[5:]

        utils.rsyncOtherToLocalhost(ipGetMaster(), source, target, opts)

    if utils.inputMatch(['rsync', 'to-any'], doPartial=True):
        host = sys.argv[3]
        source = sys.argv[4]
        target = sys.argv[5]

        opts = sys.argv[6:]

        utils.rsyncLocalhostToOther(ipGetAny(host), source, target, opts)

    if utils.inputMatch(['rsync', 'from-any'], doPartial=True):
        host = sys.argv[3]
        source = sys.argv[4]
        target = sys.argv[5]

        opts = sys.argv[6:]

        utils.rsyncOtherToLocalhost(ipGetAny(host), source, target, opts)

    if utils.inputMatch(['rsync', 'all'], doPartial=True):
        source = sys.argv[3]
        target = sys.argv[4]

        rsyncOnAllNodesLocalhostToLocalAsync(source, target)

    if utils.inputMatch(['full']):
        mainLaunchRegular()
        sshKeysDeploy()
        setScreenRc()

    if utils.inputMatch(['runall'], doPartial=True):
        command = sys.argv[2]
        runOnAllNodesAsync(command)

    if utils.inputMatch(['config']):
        print configuration.getConfig()
Esempio n. 7
0
def setupFull():
    cluster.mainLaunchRegular()
    cluster.sshKeysDeploy()
    cluster.setScreenRc()
    hadoop.setupFullWithFormat()
    spark.setupFull()
    package.setupFull()


if __name__ == "__main__":

    if utils.inputMatch([]):

        utils.print_header("GCE HELP")

        utils.print_option('full', 'cluster -> keys -> hadoop (with format) -> spark -> pypilok', hassub=False)
        utils.print_option('kill', 'kill everything', hassub=False)
        utils.print_option('cluster', 'cluster launch, kill, ssh etc ', hassub=True)
        utils.print_option('commondata', 'tools for handling the common data disk', hassub=True)
        utils.print_option('hadoop', 'generates hadoop files and upload', hassub=True)
        utils.print_option('spark', 'generates spark files and upload', hassub=True)
        utils.print_option('package', 'interact with package distribution', hassub=True)

        # utils.print_option('test')
        # utils.print_option('interact')
        # utils.print_option('rsync')
        utils.print_header("")

    if utils.inputMatch(['full']):
        setupFull()