Ejemplo n.º 1
0
def test(cmd):
    util.info("")
    util.info("- Starting " + cmd)
    util.info("")
    util.run(cmd)
    
    commands.getoutput("rm -rf " + file1)
    util.info("")
    util.info("- Sending ./testclient localhost 2010 /testdata/file1.txt")

    res = commands.getoutput("./testclient localhost 2010 /testdata/file1.txt")
    arrival = util.get_stat2(res, "Stat-req-arrival")
    dispatch = util.get_stat2(res, "Stat-req-dispatch")
    read = util.get_stat2(res, "Stat-req-read")
    complete = util.get_stat2(res, "Stat-req-complete")
    print ""
    print "dispatch = " + str(dispatch)
    print "read = " + str(read)
    print "complete = " + str(complete)


    if dispatch >= 0 and read >=0 and complete >= 0 and dispatch + read <= complete:
        util.good("You passed this test")
    else:
        util.error("Expected dispatch >= 0 and read >=0 and complete >= 0 and" 
                   " dispatch + read <= complete:")
Ejemplo n.º 2
0
def build_libuv_windows(arch):
  args = ["cmd", "/c", "vcbuild.bat", "release", "vs2017"]
  if arch == "-32":
    args.append("x86")
  elif arch == "-64":
    args.append("x64")
  run(args, cwd=LIB_UV_DIR)
Ejemplo n.º 3
0
def test(cmd):
    util.info("")
    util.info("- Starting " + cmd)
    util.info("")
    util.run(cmd)
    
    start = time.time()
    
    clientlist = []

    for i in range(0, NUM_CLIENT):
        client = testit("Client-" + str(i))
        clientlist.append(client)
        client.start()
    
    for client in clientlist:
        client.join()


    end = time.time()
    util.info("Elapsed time (in seconds): " + str(end-start))

    if util.is_server_alive(cmd) == -1:
        util.error("Ouch! Server is dead!"
                   " Your bounded buffered may not be well protected");
Ejemplo n.º 4
0
def run_on_kubernetes(args):
    create_gcloud_secret()
    context   = util.get_cluster_prefix()
    namespace = util.get_current_namespace()
    if len(args.number) == 0:
        # Figure out the nodes based on the names of persistent disks, or just node 0 if none.
        args.number = range(max(1,len(get_persistent_disks(context, namespace))))
    if 'storage-projects' not in util.get_services():
        util.run(['kubectl', 'create', '-f', 'conf/service.yaml'])
    args.local = False # so tag is for gcloud

    tag = util.get_tag(args, NAME, build)
    if not args.tag:
        tag = tag[:tag.rfind('-')]   # get rid of the final -[service] part of the tag.

    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()

    ensure_ssh()
    for number in args.number:
        deployment_name = "{name}{number}".format(name=NAME, number=number)
        ensure_persistent_disk_exists(context, namespace, number, args.size, args.type)
        with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
            tmp.write(t.format(image         = tag,
                               number        = number,
                               gcloud_bucket = gcloud_bucket(namespace=namespace),
                               pd_name       = pd_name(context=context, namespace=namespace, number=number),
                               health_delay  = args.health_delay,
                               pull_policy   = util.pull_policy(args)))
            tmp.flush()
            util.update_deployment(tmp.name)
Ejemplo n.º 5
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--computer',
      default='/Users/lnyang/lab/qd2/qd2/compute_basic_feature.py')
  parser.add_argument('--processed_dir', required=True,
                      help='dir of processed sf1 data')
  parser.add_argument('--ticker_file', required=True)
  parser.add_argument('--feature_base_dir', required=True)
  parser.add_argument('--info_dir', required=True)
  parser.add_argument('--use_mrx', action='store_true')
  args = parser.parse_args()

  for indicator, dimensions in ITEMS:
    for dimension in dimensions:
      if args.use_mrx:
        dimension = util.getMrx(dimension)
      folder = '%s-%s' % (indicator, dimension)
      feature_dir = '%s/%s' % (args.feature_base_dir, folder)
      info_file = '%s/%s' % (args.info_dir, folder)
      if not shouldRun(feature_dir, info_file):
        continue
      cmd = ('%s --processed_dir=%s --ticker_file=%s --dimension=%s '
             '--header=%s --feature_dir=%s --info_file=%s' % (
             args.computer, args.processed_dir, args.ticker_file, dimension,
             indicator, feature_dir, info_file))
      util.run(cmd)
Ejemplo n.º 6
0
def build(tag, rebuild):
    for service in SERVICES:
        v = ['sudo', 'docker', 'build', '-t', full_tag(tag, service)]
        if rebuild:
            v.append("--no-cache")
        v.append('.')
        util.run(v, path=join(SCRIPT_PATH, 'images', service))
Ejemplo n.º 7
0
def build(tag, rebuild):
    # Next build smc-hub, which depends on smc-hub-base.
    v = ['sudo', 'docker', 'build', '-t', tag]
    if rebuild:  # will cause a git pull to happen
        v.append("--no-cache")
    v.append('.')
    util.run(v, path=join(SCRIPT_PATH, 'image'))
Ejemplo n.º 8
0
Archivo: cli.py Proyecto: IxLabs/vlab
    def __init__(self, vlab, stdin=sys.stdin):
        """Instantiates a CLI object
        :param vlab: Vlab class to be run"""
        self.stdin = stdin
        self.vlab = vlab
        Cmd.__init__(self)
        print('Starting CLI:\n')

        # Setup history if readline is available
        try:
            import readline
        except ImportError:
            pass
        else:
            history_path = os.path.expanduser('~/.vlab_history')
            if os.path.isfile(history_path):
                readline.read_history_file(history_path)
            atexit.register(
                lambda: readline.write_history_file(history_path))

        while True:
            try:
                if self.isatty():
                    run('stty sane')
                self.cmdloop()
                break
            except KeyboardInterrupt:
                print('\nInterrupt\n')
Ejemplo n.º 9
0
def init_tri_from_mono(model, root_dir, mono_dir, tri_mlf, mono_list, tri_list):
    """
    Convert a monophone model and triphone mlf to triphones
    """

    ## Create the xword directory and the current output directory
    output_dir = '%s/HMM-0-0' %root_dir
    util.create_new_dir(root_dir)
    util.create_new_dir(output_dir)

    mktri_hed = '%s/mktri.hed' %output_dir
    hhed_log = '%s/hhed_clone_mono.log' %output_dir

    ## Create an HHEd script to clone monophones to triphones
    fh = open(mktri_hed, 'w')
    for line in open(mono_list):
        mono = line.strip()
        fh.write('TI T_%s {(%s).transP}\n' %(mono, mono))
    fh.write('CL %s\n' %tri_list)
    fh.close()

    ## Run HHEd to clone monophones and tie transition matricies
    cmd  = 'HHEd -A -T 1 -H %s/MMF' %mono_dir
    cmd += ' -M %s' %output_dir
    cmd += ' %s %s > %s' %(mktri_hed, mono_list, hhed_log)

    if model.local: os.system(cmd)
    else: util.run(cmd, output_dir)

    return output_dir
Ejemplo n.º 10
0
def predict(experiment_dir, config_map, predict_meta_file):
  result_dir = getResultDir(experiment_dir)
  util.maybeMakeDir(result_dir)
  result_file = getResultPath(result_dir)

  data_dir = getDataDir(experiment_dir)
  data_file = getDataPath(data_dir)
  if config_map['use_classification']:
    label_file = getLabelPath(data_dir)
  else:
    label_file = getRlabelPath(data_dir)
  meta_file = getMetaPath(data_dir)
  model_dir = getModelDir(experiment_dir)
  imputer_dir = getImputerDir(experiment_dir)

  model_prefix = '%s-' % getModelName(config_map)
  model_suffix = '-%d' % config_map['train_window']
  imputer_prefix = 'imputer-'
  imputer_suffix = '-%d' % config_map['train_window']

  cmd = ('%s/predict_all.py --data_file=%s --label_file=%s '
         '--meta_file=%s --model_dir=%s --model_prefix="%s" '
         '--model_suffix="%s" --imputer_dir=%s --imputer_prefix="%s" '
         '--imputer_suffix="%s" --prediction_window=%d '
         '--delay_window=%d --predict_meta_file=%s --result_file=%s' % (
            CODE_DIR, data_file, label_file, meta_file,
            model_dir, model_prefix, model_suffix,
            imputer_dir, imputer_prefix, imputer_suffix,
            config_map['predict_window'],
            config_map['delay_window'], predict_meta_file,
            result_file))
  util.run(cmd)
Ejemplo n.º 11
0
def test(cmd):
    print ""
    print "Starting " + cmd
    util.run(cmd)

    clientlist = []
    
    start = time.time()

    for i in range(0, NUM_CLIENT):
        client = testit("Client-" + str(i))
        client.setDaemon(True)
        clientlist.append(client)
        client.start()
    
    for client in clientlist:
        client.join()

    end = time.time()

    if util.is_server_alive(cmd) == -1:
        util.error("Ouch! Server is dead!"
                   " Your bounded buffered may not be well protected");

    print "Elapsed time (in seconds): " + str(end-start)
    if end - start > EXPECTED_TIME:
        util.error("your server is not multithreaded")
Ejemplo n.º 12
0
def test(cmd):
    global expected
    global got
    global count

    util.info("")
    util.info("- Starting " + cmd)
    util.info("")
    util.run(cmd)
    
    start = time.time()

    clientlist = []
    expected = []
    for i in range(1, NUM_CLIENT):
        expected.append(commands.getoutput("cat ./testdata/file%s.txt" % str(i)))

    commands.getoutput("rm -rf %s" % tmpfile)

    for i in range(0, NUM_CLIENT):
        client = testit("Client-" + str(i), i)
        clientlist.append(client)
        client.start()
        time.sleep(0.3)
    
    for client in clientlist:
        client.join()

    end = time.time()
    util.info("Elapsed time (in seconds): " + str(end-start))

    time.sleep(CGI_SPIN_TIME + 2)
    res = commands.getoutput("cat %s" % tmpfile)

    if util.is_server_alive(cmd) == -1:
        util.error("Ouch! Server is dead!"
                   " Your bounded buffered may not be well protected");

    pos0 = res.find(expected[0])
    pos1 = res.find(expected[1])
    pos2 = res.find(expected[2])
    passed = pos0 > 0 and pos1 > 0 and pos2 > 0 and pos0 < pos1 and pos1 < pos2
    
    util.info(res)

    if passed:
        print ""
        print "#####################################"
        print "GOOD! you implement SFF correctly"
        print "#####################################"
        print ""
        count = count + 1
    else:
        print ""
        print "#####################################"
        print "Oh oh! ERROR ERROR!"
        print "SFF is not implemented correctly"
        print "#####################################"
        print ""
        sys.exit(-1)
Ejemplo n.º 13
0
Archivo: setup.py Proyecto: F001/deno
def gn_gen(mode):
    os.environ["DENO_BUILD_MODE"] = mode

    # Rather than using gn gen --args we write directly to the args.gn file.
    # This is to avoid quoting/escaping complications when passing overrides as
    # command-line arguments.
    args_filename = os.path.join(build_path(), "args.gn")

    # Check if args.gn exists, and if it was auto-generated or handcrafted.
    existing_gn_args, hand_edited = read_gn_args(args_filename)

    # If args.gn wasn't handcrafted, regenerate it.
    if hand_edited:
        print "%s: Using gn options from hand edited '%s'." % (mode,
                                                               args_filename)
        gn_args = existing_gn_args
    else:
        print "%s: Writing gn options to '%s'." % (mode, args_filename)
        gn_args = generate_gn_args(mode)
        if gn_args != existing_gn_args:
            write_gn_args(args_filename, gn_args)

    for line in gn_args:
        print "  " + line

    run([third_party.gn_path, "gen", build_path()],
        env=third_party.google_env())
Ejemplo n.º 14
0
def install_kubernetes(args):
    version = args.version
    if not version:
        version = util.run("curl -s https://github.com/kubernetes/kubernetes/releases | grep kubernetes/tree",
                 get_output=True).splitlines()[0].split("tree/v")[1].split('"')[0]
        print("using latest version '%s'"%version)
    install_path = os.path.join(os.environ['HOME'], 'install')
    link_path = os.path.join(os.environ['HOME'], 'kubernetes')
    if not os.path.exists(install_path):
        os.makedirs(install_path)
    if os.path.exists(link_path) and not os.path.islink(link_path):
        raise RuntimeError("Please manually remove '%s'"%link_path)
    target_path = os.path.join(install_path, 'kubernetes-v%s'%version)
    if not os.path.exists(target_path):
        target = os.path.join(install_path, 'kubernetes.tar.gz')
        if os.path.exists(target):
            os.unlink(target)
        util.run(['wget', 'https://github.com/kubernetes/kubernetes/releases/download/v%s/kubernetes.tar.gz'%version],
                path = install_path)
        util.run(['tar', 'zvxf', target], path=install_path)
        os.unlink(target)
        shutil.move(os.path.join(install_path, 'kubernetes'), target_path)
    if os.path.exists(link_path):
        os.unlink(link_path)
    os.symlink(target_path, link_path)
Ejemplo n.º 15
0
def mixup(model, root_dir, prev_dir, model_list, mix_size, estimateVarFloor=0):
    """
    Run HHEd to initialize a mixup to mix_size gaussians
    """

    output_dir = '%s/HMM-%d-%d' %(root_dir, mix_size, 0)
    util.create_new_dir(output_dir)

    ## Make the hed script
    mix_hed = '%s/mix_%d.hed' %(output_dir, mix_size)
    fh = open(mix_hed, 'w')

    if estimateVarFloor:
            fh.write('LS %s/stats\n' %prev_dir)
            fh.write('FA 0.1\n')
            
    fh.write('MU %d {(sil,sp).state[2-%d].mix}\n' %(2*mix_size,model.states-1))
    fh.write('MU %d {*.state[2-%d].mix}\n' %(mix_size, model.states-1))
    fh.close()

    hhed_log = '%s/hhed_mix.log' %output_dir

    cmd  = 'HHEd -A -D -T 1 -H %s/MMF -M %s' %(prev_dir, output_dir)
    cmd += ' %s %s > %s' %(mix_hed, model_list, hhed_log)
    if model.local == 1: os.system(cmd)
    else: util.run(cmd, output_dir)

    return output_dir
Ejemplo n.º 16
0
def mixdown_mono(model, root_dir, prev_dir, phone_list):
    """
    Run HHEd to mixdown monophones
    """

    output_dir = '%s/HMM-1-0' %root_dir
    util.create_new_dir(output_dir)

    ## Create the full list of possible triphones
    phones = open(phone_list).read().splitlines()
    non_sil_phones = [p for p in phones if p not in ['sp', 'sil']]

    ## Make the hed script
    mixdown_hed = '%s/mix_down.hed' %output_dir
    fh = open(mixdown_hed, 'w')
    fh.write('MD 12 {(sil,sp).state[2-%d].mix}\n' %(model.states-1))
    for phone in non_sil_phones:
        fh.write('MD 1 {%s.state[2-%d].mix}\n' %(phone, model.states-1))
    fh.close()

    hhed_log = '%s/hhed_mixdown.log' %output_dir

    cmd  = 'HHEd -A -D -T 1 -H %s/MMF -M %s' %(prev_dir, output_dir)
    cmd += ' %s %s > %s' %(mixdown_hed, phone_list, hhed_log)
    if model.local == 1: os.system(cmd)
    else: util.run(cmd, output_dir)

    return output_dir
Ejemplo n.º 17
0
def make_sparse_file(size="1M"):
    path = "/tmp/block"
    try:
        os.unlink(path)
    except:
        pass
    run([ "/bin/dd", "if=/dev/zero", "of=%s" % path, "seek=1M", "count=0", "bs=1"])
    return path
Ejemplo n.º 18
0
 def add_file_to_itunes(filename, cue=None):
     filename = os.path.abspath(filename)
     print "Adding file %s to iTunes" % filename
     run('osascript', ADD_SONG, filename)
     if cue is not None:
         name = os.path.splitext(os.path.basename(filename))[0]
         run('osascript', SET_TAG, name, cue.artist, cue.album, getattr(cue, 'genre'), cue.getname(name),
             cue.track_count, cue.track_index(name), cue.year)
Ejemplo n.º 19
0
def createSetupLock():
    global locked
    try:
        run("mkdir %s" % env.SETUP_LOCK)
        locked = True
    except:
        logger.error("!!! First-time setup is already in progress. Exiting...")
        sys.exit(1)
Ejemplo n.º 20
0
def forward_test(args):
    v = util.get_pods(run='rethinkdb-proxy')
    v = [x for x in v if x['STATUS'] == 'Running']
    if len(v) == 0:
        print("No rethinkdb-proxy nodes available")
    else:
        print("\n\nYou may connect to rethinkdb-proxy on localhost:\n\n")
        util.run(['kubectl', 'port-forward', v[0]['NAME'], '28015:28015'])
Ejemplo n.º 21
0
def maybeRun(args, input_dir, feature, windows):
  if not shouldRun(args.feature_base_dir, args.info_dir, feature, windows):
    return
  cmd = ('%s --input_dir=%s --output_dir=%s --feature=%s --windows=%s '
         '--ticker_file=%s --info_base_dir=%s' % (
         args.computer, input_dir, args.feature_base_dir, feature, windows,
         args.ticker_file, args.info_dir))
  util.run(cmd)
Ejemplo n.º 22
0
def resize_cluster(args):
    prefix = util.get_cluster_prefix()
    if args.name:
        group = '{prefix}-{name}-minion-group'.format(prefix=prefix, name=args.name)
    else:
        group = '{prefix}-minion-group'.format(prefix=prefix)
    util.run(['gcloud', 'compute', 'instance-groups', 'managed', 'resize', group,
         '--size', str(args.size)])
Ejemplo n.º 23
0
def run_all():
    x = util.get_deployments()
    for name in ['rethinkdb-proxy', 'smc-webapp-static', 'smc-hub', 'haproxy']:
        if name not in x:
            if name == 'rethinkdb0':
                name = 'rethinkdb'
            print('\n******\nRUNNING {name}\n******\n'.format(name=name))
            util.run([join(SCRIPT_PATH,'..',name,'control.py'), 'run'])
Ejemplo n.º 24
0
def test(cmd):
    global count
    util.info("")
    util.info("- Starting " + cmd)
    util.info("")
    util.run(cmd)
    
    commands.getoutput("rm -rf " + file1)
    commands.getoutput("rm -rf " + file2)
    os.system("./testclient localhost 2010 /output.cgi >> /dev/null &");
    time.sleep(0.3)
#    os.system("./testclient localhost 2010 /testdata/file-sff-large.txt > /tmp/file1 &");
    os.system("./testclient localhost 2010 /testdata/file-sff-large.txt > %s &" % file1);
#   time.sleep(0.1)
    os.system("./testclient localhost 2010 /output.cgi >> /dev/null &");
#   time.sleep(0.1)
#    os.system("./testclient localhost 2010 /testdata/file-sff-small.txt > /tmp/file2 &");
    os.system("./testclient localhost 2010 /testdata/file-sff-small.txt > %s &" % file2);


    time.sleep(3 *  CGI_SPIN_TIME)
    time.sleep(5) # this doesn't hurt
    if util.is_server_alive(cmd) == -1:
        util.error("Ouch! Server is dead!"
                   " Your bounded buffered may not be well protected");

    # time1 = os.path.getmtime(file1)
    # time2 = os.path.getmtime(file2)

    time_decimal1 = str(os.path.getmtime(file1))
    time_decimal2 = str(os.path.getmtime(file2))

   
    # print time_decimal1
    # print time_decimal2

    passed = Decimal(time_decimal1).compare_total(Decimal(time_decimal2)) != -1

    # passed = time1 > time2

    if passed:
        print ""
        print "#####################################"
        print "GOOD! you implement SFF correctly"
        print "#####################################"
        print ""
        count = count + 1
    else:
        print ""
        print "#####################################"
        print "Oh oh! ERROR ERROR!"
        print "SFF is not implemented correctly"
        print "#####################################"
        print ""
        sys.exit(-1)

    util.kill()
Ejemplo n.º 25
0
def maybe_download_sysroot():
    if sys.platform.startswith('linux'):
        run([
            'python',
            os.path.join(root_path,
                         'build/linux/sysroot_scripts/install-sysroot.py'),
            '--arch=amd64'
        ],
            env=google_env())
Ejemplo n.º 26
0
    def checkout(self, url, depth="infinity"):
        """Check out a working copy from the given URL.

    Args:
      url: The Subversion repository URL to check out.
      depth: The depth of the working copy root.
    """
        assert not self.exists()
        util.run(["svn", "checkout", "--depth=" + depth, url, self.path()])
Ejemplo n.º 27
0
    def build(self, docker):

        if not os.path.isdir('docker/%s' % docker):
            return

        # Copy our public key to docker directory to propegate inside our containers
        shutil.copy(os.path.expanduser("~") + '/.ssh/id_rsa.pub', 'docker/%s/root' % docker)

        run('vagrant ssh --command "cd %s/%s && docker build -t=\"%s\" ."' % (self.baseDir, docker, docker), cwd="docker/%s" % docker)
Ejemplo n.º 28
0
    def commit(self, message, path=""):
        """Commit scheduled changes to the source repository.

    Args:
      message: The commit message to use.
      path: The path to commit.
    """
        assert self.exists()
        util.run(["svn", "commit", "-m", message, self.path(path)])
Ejemplo n.º 29
0
def init_repo(cache_dir, remote, fetch=True):
    repo = get_repo_path(cache_dir, hash_str(remote))
    if not is_git_dir(repo):
        run(["git", "clone", "--bare", "-q", "--", remote, repo],
            stdin=DEVNULL, check=True)
    elif fetch:
        run(["git", "-C", repo, "fetch", "--all", "-p", "-q"],
            stdin=DEVNULL, check=True)
    return repo
Ejemplo n.º 30
0
def create_install_path():
    """
    Create install-tmp/, which contains files that will get
    installed into the k8s host node when the daemon starts.
    """
    remove_install_path()
    os.makedirs("install-tmp")
    shutil.copyfile("../driver/smc-storage.py", 'install-tmp/smc-storage')
    util.run(['git', 'clone', 'https://github.com/sagemathinc/gke-zfs'], path=join(SCRIPT_PATH, 'install-tmp'))
Ejemplo n.º 31
0
    def make_snapshot(self, name):
        # Delete all the snapshots for this VM
        for snap in self.get_domain(name).listAllSnapshots():
            show('Deleting snapshot %s' % snap.getName())
            snap.delete()

        show('Creating new snapshot..')
        stdout, stderr, rc = util.run(
            ['virsh', 'snapshot-create', '--domain', name])

        show('Created!')

        if rc != 0:
            raise RuntimeError("Could not create snapshot for %s" % name)
Ejemplo n.º 32
0
    def create_vm(self, name, template=locals.TEMPLATE_NAME):

        # TODO: check if the VM with the name of name exists

        # Check whether template VM exists
        show('Checking for existence of template')
        template_domain = self.get_domain(template)

        # TODO: check if it is running, if is, print down warning and shut
        # it down

        if template_domain:
            show('Cloning..')

            # Find out next available MAC address in the pool
            new_mac = self.get_next_free_mac()

            output, errors, rc = util.run([
                'virt-clone',
                '-o',
                template,
                '--auto-clone',
                '-n',
                name,
                '-m',
                new_mac,
            ])

            if rc != 0:
                raise RuntimeError("Could not clone VM %s" % template)

            show('Cloning successful')

            # TODO: check that it started, if not, wait
            show('Starting..')
            self.start(name)
            sleep(10)

            # Macs are tied to the IPs
            last_mac_segment = new_mac.split(':')[-1]
            ip = locals.IP_BASE + '%s' % int(last_mac_segment)

            show('IP determined: %s' % ip)
            hostname = util.normalize_hostname(ip)

            return VM(name=name,
                      backend=self,
                      hostname=hostname,
                      domain=locals.DOMAIN,
                      ip=ip)
Ejemplo n.º 33
0
def download(ticker_file, download_dir, overwrite):
  tickers = util.readTickers(ticker_file)
  for ticker in tickers:
    download_file = '%s/%s.csv' % (download_dir, ticker)
    if os.path.isfile(download_file) and not overwrite:
      continue
    cmd = '%s --quiet "%s%s" -O %s' % (
        WGET, BASE_URL, ticker, download_file)
    for i in range(RETRIES):
      result = util.run(cmd, check=False)
      if result == 0:
        break
      if os.path.isfile(download_file):
        os.remove(download_file)
Ejemplo n.º 34
0
def run_exec_time(deno_exe, build_dir):
    hyperfine_exe = third_party.get_prebuilt_tool_path("hyperfine")
    benchmark_file = os.path.join(build_dir, "hyperfine_results.json")
    run([
        hyperfine_exe, "--ignore-failure", "--export-json", benchmark_file,
        "--warmup", "3"
    ] + [
        deno_exe + " run " + " ".join(args)
        for [_, args] in exec_time_benchmarks
    ])
    hyperfine_results = read_json(benchmark_file)
    results = {}
    for [[name, _], data] in zip(exec_time_benchmarks,
                                 hyperfine_results["results"]):
        results[name] = {
            "mean": data["mean"],
            "stddev": data["stddev"],
            "user": data["user"],
            "system": data["system"],
            "min": data["min"],
            "max": data["max"]
        }
    return results
Ejemplo n.º 35
0
def update_slurm_node_addrs(compute):
    for node_name, operation in operations.items():
        try:
            # Do this after the instances have been initialized and then wait
            # for all operations to finish. Then updates their addrs.
            wait_for_operation(compute, cfg.project, operation)

            pid = util.get_pid(node_name)
            my_fields = 'networkInterfaces(name,network,networkIP,subnetwork)'
            instance_networks = compute.instances().get(
                project=cfg.project,
                zone=cfg.partitions[pid].zone,
                instance=node_name,
                fields=my_fields).execute()
            instance_ip = instance_networks['networkInterfaces'][0][
                'networkIP']

            util.run(
                f"{SCONTROL} update node={node_name} nodeaddr={instance_ip}")

            log.info("Instance " + node_name + " is now up")
        except Exception:
            log.exception(f"Error in adding {node_name} to slurm")
Ejemplo n.º 36
0
def init_rawfile(volume_id, size):
    import time
    import rawfile_util
    from pathlib import Path

    from util import run

    img_dir = rawfile_util.img_dir(volume_id)
    img_dir.mkdir(exist_ok=True)
    img_file = Path(f"{img_dir}/disk.img")
    if img_file.exists():
        return
    rawfile_util.patch_metadata(
        volume_id,
        {
            "volume_id": volume_id,
            "created_at": time.time(),
            "img_file": img_file.as_posix(),
            "size": size,
        },
    )
    run(f"truncate -s {size} {img_file}")
    run(f"mkfs.ext4 {img_file}")
Ejemplo n.º 37
0
def fetch_favicon(link_dir, link, timeout=TIMEOUT):
    """download site favicon from google's favicon api"""

    if os.path.exists(os.path.join(link_dir, 'favicon.ico')):
        return {'output': 'favicon.ico', 'status': 'skipped'}

    CMD = [
        CURL_BINARY,
        '--max-time',
        str(timeout),
        *(() if CHECK_SSL_VALIDITY else ('--insecure', )),
        'https://www.google.com/s2/favicons?domain={}'.format(
            domain(link['url'])),
    ]
    fout = open('{}/favicon.ico'.format(link_dir), 'w')
    end = progress(timeout, prefix='      ')
    try:
        run(CMD, stdout=fout, stderr=DEVNULL, cwd=link_dir,
            timeout=timeout)  # favicon.ico
        fout.close()
        end()
        chmod_file('favicon.ico', cwd=link_dir)
        output = 'favicon.ico'
    except Exception as e:
        fout.close()
        end()
        print('        {}Failed: {} {}{}'.format(ANSI['red'],
                                                 e.__class__.__name__, e,
                                                 ANSI['reset']))
        print('        Run to see full output:')
        print('            {}'.format(' '.join(CMD)))
        output = e

    return {
        'cmd': CMD,
        'output': output,
    }
Ejemplo n.º 38
0
def main(argv):
    if len(argv) == 2:
        build_dir = sys.argv[1]
    elif len(argv) == 1:
        build_dir = build_path()
    else:
        print "Usage: tools/benchmark.py [build_dir]"
        sys.exit(1)

    deno_path = os.path.join(build_dir, "deno")
    benchmark_file = os.path.join(build_dir, "benchmark.json")

    os.chdir(root_path)
    import_data_from_gh_pages()
    # TODO: Use hyperfine in //third_party
    run(["hyperfine", "--export-json", benchmark_file, "--warmup", "3"] +
        [deno_path + " " + " ".join(args) for [_, args] in benchmarks])
    all_data = read_json(data_file)
    benchmark_data = read_json(benchmark_file)
    sha1 = run_output(["git", "rev-parse", "HEAD"]).strip()
    new_data = {
        "created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
        "sha1": sha1,
        "binary_size": os.path.getsize(deno_path),
        "benchmark": {}
    }
    for [[name, _], data] in zip(benchmarks, benchmark_data["results"]):
        new_data["benchmark"][name] = {
            "mean": data["mean"],
            "stddev": data["stddev"],
            "user": data["user"],
            "system": data["system"],
            "min": data["min"],
            "max": data["max"]
        }
    all_data.append(new_data)
    write_json(data_file, all_data)
Ejemplo n.º 39
0
Archivo: lint.py Proyecto: x1B/deno
def eslint():
    script = os.path.join(third_party_path, "node_modules", "eslint", "bin",
                          "eslint")
    # Find all *directories* in the main repo that contain .ts/.js files.
    source_files = get_sources(root_path, [
        "*.js",
        "*.ts",
        ":!:cli/tests/swc_syntax_error.ts",
        ":!:std/**/testdata/*",
        ":!:std/**/node_modules/*",
        ":!:cli/compilers/wasm_wrap.js",
        ":!:cli/tests/error_syntax.js",
        ":!:cli/tests/lint/**",
        ":!:cli/tests/encoding/**",
        ":!:cli/dts/**",
        ":!:cli/tsc/*typescript.js",
        ":!:cli/bench/node*.js",
    ])
    if source_files:
        max_command_len = 30000
        pre_command = ["node", script, "--max-warnings=0", "--"]
        chunks = [[]]
        cmd_len = len(" ".join(pre_command))
        for f in source_files:
            if cmd_len + len(f) > max_command_len:
                chunks.append([f])
                cmd_len = len(" ".join(pre_command))
            else:
                chunks[-1].append(f)
                cmd_len = cmd_len + len(f) + 1
        for c in chunks:
            print_command("eslint", c)
            # Set NODE_PATH so we don't have to maintain a symlink in root_path.
            env = os.environ.copy()
            env["NODE_PATH"] = os.path.join(root_path, "third_party",
                                            "node_modules")
            run(pre_command + c, shell=False, env=env, quiet=True)
Ejemplo n.º 40
0
    def stage_package(self):
        super().stage_package_before()
        run("mkdir -p {}".format(self.stage_include_dir_path))
        run("rm -rf {}/*".format(self.package_stage_include_dir_path))
        run("cp -rv {}/{} {}".format(self.package_clone_dir_path,
                                     header_cp_pattern,
                                     self.package_stage_include_dir_path))

        super().stage_package_after()
Ejemplo n.º 41
0
def run_on_kubernetes(args):

    util.ensure_secret_exists('sendgrid-api-key', 'sendgrid')
    util.ensure_secret_exists('zendesk-api-key', 'zendesk')
    args.local = False  # so tag is for gcloud
    if args.replicas is None:
        args.replicas = util.get_desired_replicas(NAME, 2)
    tag = util.get_tag(args, NAME, build)

    opts = {
        'image_hub': tag,
        'replicas': args.replicas,
        'pull_policy': util.pull_policy(args),
        'min_read_seconds': args.gentle,
        'smc_db_hosts': args.database_nodes,
        'smc_db_pool': args.database_pool_size,
        'smc_db_concurrent_warn': args.database_concurrent_warn
    }

    if args.database_nodes == 'localhost':
        from argparse import Namespace
        ns = Namespace(tag=args.rethinkdb_proxy_tag, local=False)
        opts['image_rethinkdb_proxy'] = util.get_tag(ns, 'rethinkdb-proxy',
                                                     build)
        filename = 'smc-hub-rethinkdb-proxy.template.yaml'
    else:
        filename = '{name}.template.yaml'.format(name=NAME)
    t = open(join('conf', filename)).read()
    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        r = t.format(**opts)
        #print(r)
        tmp.write(r)
        tmp.flush()
        util.update_deployment(tmp.name)

    if NAME not in util.get_services():
        util.run(['kubectl', 'expose', 'deployment', NAME])
Ejemplo n.º 42
0
def fetch_git(link_dir, link, timeout=TIMEOUT):
    """download full site using git"""

    url_is_clonable = (domain(link['url']) in GIT_DOMAINS
                       or link['url'].endswith('.git')
                       or link['type'] == 'git')

    if not url_is_clonable:
        return {'output': None, 'status': 'skipped'}

    git_dir = os.path.join(link_dir, 'git')
    if os.path.exists(git_dir):
        return {'output': 'git', 'status': 'skipped'}

    os.makedirs(git_dir, exist_ok=True)
    output = 'git'
    CMD = [
        GIT_BINARY,
        'clone',
        '--mirror',
        '--recursive',
        *(() if CHECK_SSL_VALIDITY else ('-c', 'http.sslVerify=false')),
        without_query(without_fragment(link['url'])),
    ]
    end = progress(timeout, prefix='      ')
    try:
        result = run(CMD,
                     stdout=PIPE,
                     stderr=PIPE,
                     cwd=git_dir,
                     timeout=timeout + 1)
        end()

        if result.returncode == 128:
            # ignore failed re-download when the folder already exists
            pass
        elif result.returncode > 0:
            print('        got git response code {}:'.format(
                result.returncode))
            raise Exception('Failed git download')
    except Exception as e:
        end()
        output = e
        print_error_hints(cmd=CMD, pwd=link_dir, err=e)

    return {
        'cmd': CMD,
        'output': output,
    }
Ejemplo n.º 43
0
def rio_bind_workload(stack, vname, wrklname):
    fullVolName = (f"{stack}/{vname}")
    fullWklName = (f"{stack}/{wrklname}")

    util.wait_for_state(fullVolName, "active")
    util.run(f"rio exec {fullVolName} touch /persistentvolumes/helloworld")
    util.run(f"rio run -n {fullWklName} -v data-{vname}-0:/data nginx")
    util.run(f"rio wait {fullWklName}")
    output = util.run(f"rio exec {fullWklName} ls /data")

    print(f'OUTPUT = {output}')

    return output
Ejemplo n.º 44
0
def dlint():
    executable_path = get_prebuilt_tool_path("dlint")

    # Find all *directories* in the main repo that contain .ts/.js files.
    source_files = get_sources(root_path, [
        "*.js",
        "*.ts",
        ":!:cli/tests/swc_syntax_error.ts",
        ":!:cli/tests/038_checkjs.js",
        ":!:cli/tests/error_008_checkjs.js",
        ":!:std/**/testdata/*",
        ":!:std/**/node_modules/*",
        ":!:cli/bench/node*.js",
        ":!:cli/compilers/wasm_wrap.js",
        ":!:cli/dts/**",
        ":!:cli/tests/encoding/**",
        ":!:cli/tests/error_syntax.js",
        ":!:cli/tests/lint/**",
        ":!:cli/tests/tsc/**",
        ":!:cli/tsc/*typescript.js",
    ])
    if source_files:
        max_command_len = 30000
        pre_command = [executable_path, "run"]
        chunks = [[]]
        cmd_len = len(" ".join(pre_command))
        for f in source_files:
            if cmd_len + len(f) > max_command_len:
                chunks.append([f])
                cmd_len = len(" ".join(pre_command))
            else:
                chunks[-1].append(f)
                cmd_len = cmd_len + len(f) + 1
        for c in chunks:
            print_command("dlint", c)
            run(pre_command + c, shell=False, quiet=True)
Ejemplo n.º 45
0
def create_cluster(args):
    if args.min_nodes > args.max_nodes:
        args.max_nodes = args.min_nodes
    if args.cost:
        c = cost_of_cluster(node_size = args.node_size,
                            node_disk_type = 'pd-ssd' if args.node_ssd else 'pd-standard',
                            node_disk_size = args.node_disk_size,
                            min_nodes = args.min_nodes,
                            max_nodes = args.max_nodes,
                            master_size = args.master_size,
                            master_disk_type = 'pd-ssd',  # forced by k8s
                            master_disk_size = args.master_disk_size,
                            preemptible = not args.non_preemptible)
        print(c)
        return

    # see https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/config-default.sh for env vars
    env = {
        'KUBE_ENABLE_CLUSTER_MONITORING' : 'google',
        'KUBE_GCE_ZONE'                  : args.zone,
        'NODE_SIZE'                      : args.node_size,
        'NUM_NODES'                      : str(args.min_nodes),
        'MASTER_SIZE'                    : args.master_size,
        'MASTER_DISK_SIZE'               : "%sGB"%args.master_disk_size,
        'NODE_DISK_TYPE'                 : 'pd-ssd' if args.node_ssd else 'pd-standard',
        'NODE_DISK_SIZE'                 : "%sGB"%args.node_disk_size,
        'PREEMPTIBLE_NODE'               : 'false' if args.non_preemptible else 'true',
        'KUBE_GCE_INSTANCE_PREFIX'       : 'k8s',
        'KUBE_ENABLE_NODE_AUTOSCALER'    : 'true' if args.min_nodes < args.max_nodes else 'false',
        'KUBE_AUTOSCALER_MIN_NODES'      : str(args.min_nodes),
        'KUBE_AUTOSCALER_MAX_NODES'      : str(args.max_nodes)
    }

    env.update(os.environ)
    util.run(join(CLUSTER, 'kube-up.sh'), env=env)
    update_firewall()
Ejemplo n.º 46
0
def container_registry_has_image(full_image_name, docker_path):
    env = os.environ.copy()
    env["DOCKER_CLI_EXPERIMENTAL"] = "enabled"  # needed for "docker manifest"
    proc = run(docker_path,
               "manifest",
               "inspect",
               "--insecure",
               full_image_name,
               env=env,
               check=False,
               quiet=True)
    image_found = proc.returncode == 0
    log.debug(
        "Image {} in registry".format("found" if image_found else "not found"))
    return image_found
Ejemplo n.º 47
0
def available_cluster_ip_range():
    # Determine available ip range. TODO: this is NOT rock solid -- it's just enough to
    # prevent collisions with other clusters, which is all we need.  However, be nervous.
    routes = json.loads(util.run(['gcloud', '--format=json', 'compute', 'routes', 'list'], get_output=True))
    n = 245
    ranges = [route['destRange'] for route in routes]
    ranges.sort()
    print(ranges)
    while True:
        for route in ranges:
            if route.startswith('10.%s'%n):
                n += 1
                continue
        break
    return '10.%s.0.0/16'%n
Ejemplo n.º 48
0
def postmsg(opts):
    """Helpful info to show at the end for release manager."""
    cmd = ['git', 'rev-parse', 'remotes/origin/stable']
    ret = util.run(opts, cmd, encoding='utf-8', stdout=subprocess.PIPE)
    current_release = ret.stdout.strip()

    cmd = [
        'git', 'log', '--format=%h (%aN) %s', '--no-merges',
        f'remotes/origin/stable..{opts.tag}'
    ]
    ret = util.run(opts, cmd, encoding='utf-8', stdout=subprocess.PIPE)
    shortlog = ret.stdout.strip()

    print(f"""
Here's the short log since the last release.
{shortlog}

To push release to the public:
  git push origin {opts.commit}:stable {opts.tag} -n
NB: People will start upgrading to this version immediately.

To roll back a release:
  git push origin --force {current_release}:stable -n
""")
Ejemplo n.º 49
0
    def testExecuteReorder(self):

        GEN_FILE = TEST + "testlib/testlib_patched-reorder.dll"

        changed_bytes = reorder.do_reordering(self.f.blocks, gen_patched=True)
        # check the number of changed bytes
        self.assertEqual(len(changed_bytes), 56)
        # check the numner of generated files
        self.assertEqual(len(glob.glob(GEN_FILE)), 1)
        # check the output of the generated files
        output = util.run("%s %s" % (self.PROG, GEN_FILE), 10)
        self.assertTrue(self.EXPECT in output,
                        "%s: %s %s" % (GEN_FILE, self.EXPECT, output))
        # remove the generated files
        map(os.remove, glob.glob(GEN_FILE))
Ejemplo n.º 50
0
def main(args):
    # Delete it if already there so we ensure we get the correct version if the
    # version number in this script changes.
    clean_dir("deps")

    print("Cloning libuv...")
    run([
        "git", "clone", "--quiet", "--depth=1",
        "https://github.com/libuv/libuv.git", LIB_UV_DIR
    ])

    print("Getting tags...")
    run(["git", "fetch", "--quiet", "--depth=1", "--tags"], cwd=LIB_UV_DIR)

    print("Checking out libuv " + LIB_UV_VERSION + "...")
    run(["git", "checkout", "--quiet", LIB_UV_VERSION], cwd=LIB_UV_DIR)

    # TODO: Pin gyp to a known-good commit. Update a previously downloaded gyp
    # if it doesn't match that commit.
    print("Downloading gyp...")
    run([
        "git", "clone", "--quiet", "--depth=1",
        "https://chromium.googlesource.com/external/gyp.git",
        LIB_UV_DIR + "/build/gyp"
    ])

    # We don't need all of libuv and gyp's various support files.
    print("Deleting unneeded files...")
    remove_dir("deps/libuv/build/gyp/buildbot")
    remove_dir("deps/libuv/build/gyp/infra")
    remove_dir("deps/libuv/build/gyp/samples")
    remove_dir("deps/libuv/build/gyp/test")
    remove_dir("deps/libuv/build/gyp/tools")
    remove_dir("deps/libuv/docs")
    remove_dir("deps/libuv/img")
    remove_dir("deps/libuv/samples")
    remove_dir("deps/libuv/test")

    # We are going to commit libuv and GYP in the main Wren repo, so we don't
    # want them to be their own repos.
    remove_dir("deps/libuv/.git")
    remove_dir("deps/libuv/build/gyp/.git")

    # Libuv's .gitignore ignores GYP, but we want to commit it.
    replace_in_file("deps/libuv/.gitignore", "/build/gyp",
                    "# /build/gyp (We do want to commit GYP in Wren's repo)")
Ejemplo n.º 51
0
    def testExecuteEquiv(self):

        GEN_FILE = TEST + "testlib/testlib_patched-equiv.dll"

        changed_bytes = equiv.do_equiv_instrs(self.f.instrs, gen_patched=True)
        # check the number of changed bytes
        self.assertEqual(len(changed_bytes), 13)
        # check the numner of generated files
        self.assertEqual(len(glob.glob(GEN_FILE)), 1)
        output = util.run("%s %s" % (self.PROG, GEN_FILE), 10)
        # check the output of the generated files
        self.assertTrue(self.EXPECT in output,
                        "%s: %s %s" % (GEN_FILE, self.EXPECT, output))
        # remove the generated files
        map(os.remove, glob.glob(GEN_FILE))
Ejemplo n.º 52
0
def create_kubectl_secret():
    """
    Ensure that the kubectl secret needed for using kubectl instead of the pod to
    use this cluster/namespace exists.
    """
    if SECRET_NAME not in util.get_secrets():
        with tempfile.TemporaryDirectory() as tmp:
            target = join(tmp, 'config')
            config = json.loads(util.run(['kubectl', 'config', 'view', '--raw', '-o=json'], get_output=True, verbose=False))
            prefix = util.get_cluster_prefix()
            # Include only secret info that is relevant to this cluster (a mild security measure -- we can't restrict namespace btw).
            for k in ['contexts', 'clusters', 'users']:
                config[k] = [x for x in config[k] if x['name'].endswith(prefix)]
            open(join(tmp, 'config'), 'w').write(yaml.dump(config))
            util.create_secret(SECRET_NAME, tmp)
Ejemplo n.º 53
0
  def update_stats(self):  # todo: split into separate stats/svd updates
    """Updates all covariance/SVD info of correctable factors."""
    s = self
    ops = []

    # update covariances
    #    s.grad.update()   # TODO: not needed
    #    s.grad2.update()
    
    for var in s:
      ops.append(s[var].A.cov_update_op)
      ops.append(s[var].B2.cov_update_op)

    with u.timeit("covariances"):
      u.run(ops)

    # update SVDs
    corrected_vars = list(s)
    with u.timeit("svd"):
      with s.write_lock():
        for var in s:
          if not dont_update_first_layer or s[var].A.svd.update_counter==0:
            s[var].A.svd.update()
          s[var].B2.svd.update()
Ejemplo n.º 54
0
def test(cmd):
    util.info("")
    util.info("- Starting " + cmd)
    util.info("")
    util.run(cmd)

    start = time.time()

    clientlist = []

    for i in range(0, NUM_CLIENT):
        client = testit("Client-" + str(i))
        clientlist.append(client)
        client.start()

    for client in clientlist:
        client.join()

    end = time.time()
    util.info("Elapsed time (in seconds): " + str(end - start))

    if util.is_server_alive(cmd) == -1:
        util.error("Ouch! Server is dead!"
                   " Your bounded buffered may not be well protected")
Ejemplo n.º 55
0
def run_on_kubernetes(args):
    create_gcloud_secret()
    context = util.get_cluster_prefix()
    namespace = util.get_current_namespace()
    if len(args.number) == 0:
        # Figure out the nodes based on the names of persistent disks, or just node 0 if none.
        args.number = range(
            max(1, len(get_persistent_disks(context, namespace))))
    if 'storage-projects' not in util.get_services():
        util.run(['kubectl', 'create', '-f', 'conf/service.yaml'])
    args.local = False  # so tag is for gcloud

    tag = util.get_tag(args, NAME, build)
    if not args.tag:
        tag = tag[:tag.rfind(
            '-')]  # get rid of the final -[service] part of the tag.

    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()

    ensure_ssh()
    for number in args.number:
        deployment_name = "{name}{number}".format(name=NAME, number=number)
        ensure_persistent_disk_exists(context, namespace, number, args.size,
                                      args.type)
        with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
            tmp.write(
                t.format(image=tag,
                         number=number,
                         gcloud_bucket=gcloud_bucket(namespace=namespace),
                         pd_name=pd_name(context=context,
                                         namespace=namespace,
                                         number=number),
                         health_delay=args.health_delay,
                         pull_policy=util.pull_policy(args)))
            tmp.flush()
            util.update_deployment(tmp.name)
Ejemplo n.º 56
0
def collectData(experiment_dir, config_map):
    data_dir = getDataDir(experiment_dir)
    util.maybeMakeDir(data_dir)

    gain_dir = getLabelDir(config_map['label'])
    feature_list = getFeatureListPath(experiment_dir)
    data_file = getDataPath(data_dir)
    label_file = getLabelPath(data_dir)
    rlabel_file = getRlabelPath(data_dir)
    meta_file = getMetaPath(data_dir)
    weight_file = getWeightPath(data_dir)

    cmd = (
        '%s/collect_data.py --gain_dir=%s --max_neg=%f --min_pos=%f '
        '--feature_base_dir=%s --feature_list=%s --feature_stats=%s '
        '--min_date=%s --max_date=%s --window=%d --min_feature_perc=%f '
        '--data_file=%s --label_file=%s --rlabel_file=%s --meta_file=%s '
        '--weight_power=%f --weight_file=%s' %
        (CODE_DIR, gain_dir, config_map['max_neg'], config_map['min_pos'],
         FEATURE_DIR, feature_list, FEATURE_STATS_FILE, config_map['min_date'],
         config_map['max_date'], config_map['feature_window'],
         config_map['min_feature_perc'], data_file, label_file, rlabel_file,
         meta_file, config_map['weight_power'], weight_file))
    util.run(cmd)
Ejemplo n.º 57
0
def eslint():
    script = os.path.join(third_party_path, "node_modules", "eslint", "bin",
                          "eslint")
    # Find all *directories* in the main repo that contain .ts/.js files.
    source_files = get_sources(root_path, [
        "*.js",
        "*.ts",
        ":!:cli/tests/swc_syntax_error.ts",
        ":!:std/**/testdata/*",
        ":!:std/**/node_modules/*",
        ":!:cli/compilers/wasm_wrap.js",
        ":!:cli/tests/error_syntax.js",
        ":!:cli/tests/lint/**",
    ])
    if source_files:
        print_command("eslint", source_files)
        # Set NODE_PATH so we don't have to maintain a symlink in root_path.
        env = os.environ.copy()
        env["NODE_PATH"] = os.path.join(root_path, "third_party",
                                        "node_modules")
        run(["node", script, "--max-warnings=0", "--"] + source_files,
            shell=False,
            env=env,
            quiet=True)
Ejemplo n.º 58
0
def test_rio_app_endpoint(nspc):
    image = "ibuildthecloud/demo:v1"
    image2 = "ibuildthecloud/demo:v3"

    srv = create_service(nspc, image)
    fullName = (f"{nspc}/{srv}")
    print(fullName)
    stage_service(image2, fullName, "v3")

    appEndpoint = get_app_info(fullName, "status.endpoints[0]")
    print(f"{appEndpoint}")

    results = util.run(f"curl -s {appEndpoint}")
    print(f"{results}")

    assert results == 'Hello World'
Ejemplo n.º 59
0
    def up(self):
        self.vagrant('up')

        # Wait for VirtualBox tools to come online
        if not os.environ.get('VAGRANT_DEFAULT_PROVIDER') or os.environ.get(
                'VAGRANT_DEFAULT_PROVIDER') == 'virtualbox':
            # Wait for VirtualBox to restart
            time.sleep(30)

            while run('vagrant ssh --command "pgrep -f VBoxService"',
                      returncode=True,
                      echo=False):
                sys.stdout.write(".")
            print "."

        self.vagrant('reload')
Ejemplo n.º 60
0
def main():
    args = parse_args()

    log.debug("Dockerfile: {}, context: {}, docker build args: '{}'".format(
        args.dockerfile, args.context, args.docker_build_args))

    use_container_registry = args.container_registry is not None

    if not use_container_registry:
        log.info("No container registry will be used")

    tag = generate_tag(args.dockerfile, args.context, args.docker_build_args)

    full_image_name = \
        "{}.azurecr.io/{}:{}".format(args.container_registry, args.repository, tag) \
        if use_container_registry else \
        "{}:{}".format(args.repository, tag)

    log.info("Image: {}".format(full_image_name))

    if use_container_registry and container_registry_has_image(
            full_image_name, args.docker_path):
        log.info("Pulling image...")
        run(args.docker_path, "pull", full_image_name)
    else:
        log.info("Building image...")
        run(args.docker_path, "build", "--pull",
            *shlex.split(args.docker_build_args),
            *shlex.split(args.docker_build_args_not_affecting_image_content),
            "--tag", full_image_name, "--file", args.dockerfile, args.context)

        if use_container_registry:
            # avoid pushing if an identically tagged image has been pushed since the last check
            # there is still a race condition, but this reduces the chance of a redundant push
            if not container_registry_has_image(full_image_name,
                                                args.docker_path):
                log.info("Pushing image...")
                run(args.docker_path, "push", full_image_name)
            else:
                log.info("Image now found, skipping push")

    # tag so we can refer to the image by repository name
    run(args.docker_path, "tag", full_image_name, args.repository)

    return 0