Esempio n. 1
1
    def init_adapter(self, cfg):
        if not cfg.get('adapter'):
            raise RuntimeError("Required parameter missing for ssh_wrapper: adapter")

        config = {
            'as': self._as,
            'config': cfg.get('adapter_config', {})
        }

        self._adapter = get_adapter(cfg['adapter'])(config)

        host = cfg.get('host', 'localhost')
        user = cfg.get('user')
        identity = cfg.get('identity')

        run = ssh.bake(host)

        if user:
            run = run.bake('-u {}'.format(user))

        if identity:
            run = run.bake('-i {}'.format(identity))

        self._adapter.run = run
Esempio n. 2
0
def send_to_matlab(model, averages):
    fd = tempfile.NamedTemporaryFile()
    fd.write('\n'.join('%s,%f' % a for a in averages))
    fd.flush()

    logger.debug('Sending query list and GoogleScores to fmedia13')
    scp(fd.name, 'fmedia13:/tmp/fludetector_google_matlab_input')
    fd.close()

    fmedia13 = ssh.bake('fmedia13')

    run = ';'.join([
        "fin='/tmp/fludetector_google_matlab_input'",
        "fout='/tmp/fludetector_google_matlab_output'",
        "cd /home/vlampos/website_v2",
        "run('gpml/startup.m')",
        "%s(fin,fout)" % model.get_data()['matlab_function'],
        "exit"])
    logger.debug('Running matlab function over scores')
    fmedia13('matlab', '-nodisplay', '-nojvm', '-r', '"%s"' % run)

    logger.debug('Reading matlab results back')
    value = float(fmedia13('cat', '/tmp/fludetector_google_matlab_output').strip())

    logger.debug('Cleaning up temp files')
    fmedia13('rm', '/tmp/fludetector_google_matlab_input', '/tmp/fludetector_google_matlab_output')

    return value
Esempio n. 3
0
def send_email(user, event, message, **kw):
  if not user in config['email']['user_emails']:
    return

  args = {
    'f': config['email']['from_address'],
    't': config['email']['user_emails'][user],
    'u': kw['subject'] if 'subject' in kw else 'Notification',
  }

  if not 'app' in kw:
    kw['app'] = config['default_app']

  body = HTML('html')
  tr = body.table().tr()
  tr.td(valign='top').img(src=config['icons'][kw['app']], style='float:left; margin: 15px')
  try:
    if 'email_body' in kw:
      tr.td().text(kw['email_body'], escape=False)
    else:
      getattr(notifications, event + '_email')(tr.td(), message, **kw)
  except:
    with tr.td().p(style='margin-top: 15px') as p:
      p.b("Message:")
      p.br()
      p.text(message)

  ip = curl('ifconfig.me').strip()
  if ip != config['ip']:
    ybdst = ssh.bake(config['ip'])
    print "Sent %s email to %s" % (event, user)
    return ybdst.sendemail(_in=str(body), **args)
  else:
    print "Sent %s email to %s" % (event, user)
    return sendemail(_in=str(body), **args)
    def prepare(self):
        """Populate the environment of the process with the necessary values.

        In addition, it creates helper objects to run shell and SSH processes.
        """
        # Populate environment with required variables
        os.environ["HS2_HOST_PORT"] = self.config["hs2"]
        os.environ["HDFS_NN"] = self.config["namenode"]
        os.environ["IMPALAD"] = self.config["impalad"][0]
        os.environ["REMOTE_LOAD"] = "1"
        os.environ["HADOOP_USER_NAME"] = "hdfs"
        os.environ["TEST_WAREHOUSE_DIR"] = self.config["hive_warehouse_directory"]
        os.environ["KUDU_MASTER"] = self.config["kudu_master"]

        if self.options.snapshot_file is None:
            if "SNAPSHOT_DIR" in os.environ:
                snapshot_dir = os.environ["SNAPSHOT_DIR"]
            else:
                snapshot_dir = "{0}/testdata/test-warehouse-SNAPSHOT".format(
                    os.getenv("WORKSPACE"))
            if not os.path.isdir(snapshot_dir):
                err_msg = 'Snapshot directory "{0}" is not a valid directory'
                logger.error(err_msg.format(snapshot_dir))
                raise OSError("Could not find test-warehouse snapshot file.")

            logger.info("Snapshot directory: {0}".format(snapshot_dir))
            self.snapshot_file = self.find_snapshot_file(snapshot_dir)
        else:
            self.snapshot_file = self.options.snapshot_file

        # Prepare shortcuts for connecting to remote services
        self.gtw_ssh = ssh.bake("{0}@{1}".format(self.options.ssh_user, self.gateway),
                                "-oStrictHostKeyChecking=no",
                                "-oUserKnownHostsFile=/dev/null",
                                t=True, _out=tee, _err=tee)

        self.beeline = sh.beeline.bake(silent=False, outputformat="csv2", n="impala",
                                       u="jdbc:hive2://{0}/default".format(
                                           self.config["hs2"]))

        self.load_test_warehouse = sh.Command(
            "{0}/testdata/bin/load-test-warehouse-snapshot.sh".format(
                self.impala_home)).bake(
            _out=tee, _err=tee)

        self.create_load_data = sh.Command(
            "{0}/testdata/bin/create-load-data.sh".format(self.impala_home))

        self.main_impalad = self.config["impalad"][0]
        self.impala_shell = sh.Command("impala-shell.sh").bake(i=self.main_impalad,
                                                               _out=tee, _err=tee)

        self.python = sh.Command("impala-python").bake(u=True)
        self.compute_stats = sh.Command(
            "{0}/testdata/bin/compute-table-stats.sh".format(self.impala_home)).bake(
            _out=tee, _err=tee)
Esempio n. 5
0
    def conexion_ssh(self):
        self.logger.debug(self)
        login = self.creds['user']+"@"+self.creds['ip']

        if self.dsa_key is False:
            sp = self._sshpass()
            conexion = sp.bake("ssh", login)
        else:
            conexion = ssh.bake(login)
        return conexion
Esempio n. 6
0
    def ssh(self, **kwargs):
        """
        Return a pre-baked ssh client method to be used for calling
        commands on the distant server. Each command called will be in
        a different connection.

        :return: sh.Command
        """
        return ssh.bake(self.ip(), '-A', '-t', i=self.ssh_key(), l='vagrant',
                        o='StrictHostKeyChecking no', **kwargs)
Esempio n. 7
0
def create_vm(vm_name_prefix, vm_image):
    # _ is not valid in a vm name
    vm_name = '{}-{}'.format(vm_name_prefix.replace('_', '-'), RESOURCE_GROUP)
    az(['vm', 'create', '-g', RESOURCE_GROUP, '-n', vm_name, '--authentication-type', 'ssh', '--image', vm_image, '--admin-username', VM_USERNAME])
    io = StringIO()
    az(['vm', 'list-ip-addresses', '--resource-group', RESOURCE_GROUP, '--name', vm_name, '--query', '[0].virtualMachine.network.publicIpAddresses[0].ipAddress'], _out=io)
    vm_ip_address = io.getvalue().strip().replace('"', '')
    io.close()
    vm = ssh.bake(['-oStrictHostKeyChecking=no', '-tt', "{}@{}".format(VM_USERNAME, vm_ip_address)])
    return vm
 def __init__(self, hpcCfg):
     """Initialize connection object."""
     self.logger = getLogger(__name__)
     assert hpcCfg is not None
     self.hpcConfig = hpcCfg
     self.logger.debug("SSH properties\n host: %s,\n user: %s,\n port: %s",
                       self.hpcConfig.get_value('host'),
                       self.hpcConfig.get_value('user_name'),
                       self.hpcConfig.get_value('ssh_port'))
     try:
         self.ssh_conn = ssh.bake('-n', self.hpcConfig.get_value('host'))
         self.ssh_conn = ssh.bake('-l',
                                  self.hpcConfig.get_value('user_name'))
         self.ssh_conn = ssh.bake('-p',
                                  self.hpcConfig.get_value('ssh_port'))
         self.ssh_conn = ssh.bake('-i', self.hpcConfig.get_value('ssh_key'))
         self.ssh_conn = ssh.bake(self.hpcConfig.get_value('host'))
         # enforce desired log level
         muteSH()
         getLogger("sh.command").setLevel(WARNING)
     except ErrorReturnCode as e:
         self.logger.error('SSH initialization failed:\n{}'.format(
             e.stderr))
         sys.exit(1)
Esempio n. 9
0
 def __init__(self, host, userhost='web', timeout=60):
     log = logger("Server configuration ")
     # self.user =  UserConfig()
     if host:
         log.info('Configurating host')
         host_ssh = '%s@%s' % (userhost, host['ip'])
         self.ssh = ssh.bake(host_ssh, '-p', host['port'] or '22', '-A',
                             '-o', 'UserKnownHostsFile=/dev/null', '-o',
                             'StrictHostKeyChecking=no', '-o',
                             'BatchMode=yes', '-o',
                             'PasswordAuthentication=no', '-o',
                             'ConnectTimeout=%s' % timeout)
     else:
         log.error('No host to deploy')
         exit(0)
Esempio n. 10
0
def test(index):
    name = generate_name(index)
    result = fgrep(nova.show(name), 'network')

    (start, label, ips, rest) = result.replace(' ', '').split('|')
    print
    ips
    try:
        (private_ip, public_ip) = ips.split(',')
    except:
        print 'public IP is not yet assigned'
        sys.exit()
    print public_ip
    rsh = ssh.bake('%s@%s' % ('ubuntu', str(public_ip)))
    remote = rsh.bake('-o StrictHostKeyChecking no')
    test1 = remote('uname').replace('\n', '')
    print '<%s>' % test1
    print remote('uname -a')
    print remote('hostname')
    print remote('pwd')
def test(index):
    name = generate_name(index)
    result = fgrep(nova.show(name), 'network')

    (start, label, ips, rest) = result.replace(' ', '').split('|')
    print
    ips
    try:
        (private_ip, public_ip) = ips.split(',')
    except:
        print 'public IP is not yet assigned'
        sys.exit()
    print public_ip
    rsh = ssh.bake('%s@%s' % ('ubuntu', str(public_ip)))
    remote = rsh.bake('-o StrictHostKeyChecking no')
    test1 = remote('uname').replace('\n', '')
    print '<%s>' % test1
    print remote('uname -a')
    print remote('hostname')
    print remote('pwd')
Esempio n. 12
0
    def init_adapter(self, cfg):
        if not cfg.get('adapter'):
            raise RuntimeError(
                "Required parameter missing for ssh_wrapper: adapter")

        config = {'as': self._as, 'config': cfg.get('adapter_config', {})}

        self._adapter = get_adapter(cfg['adapter'])(config)

        host = cfg.get('host', 'localhost')
        user = cfg.get('user')
        identity = cfg.get('identity')

        run = ssh.bake(host)

        if user:
            run = run.bake('-u {}'.format(user))

        if identity:
            run = run.bake('-i {}'.format(identity))

        self._adapter.run = run
Esempio n. 13
0
File: log.py Progetto: weapp/miner
    def extract(self, conf):
        merge = conf.get("should_line_merge", False)
        before = conf.get("break_only_before", None)
        if merge:
            before = re.compile(before)
        
        value = HashBuilder({
                "type": conf.get("type", "log"),
                "source_path": conf["path"],
                "tags": conf.get("tags", [])
            })

        if conf.get("host"):
            host = "%s@%s" % (conf["user"], conf["host"]) if conf.get("user") else conf["host"]
            remote_server = ssh.bake(host)
            mac = str(ssh(host, "ifconfig | grep 'eth0'"))
            value.shared["source_host"] = host
            value.shared["source_mac"] = re.findall("..:..:..:..:..:..", mac)[0]
            log = remote_server.tail("-f", conf["path"], _iter=True)
        else:
            touch(conf["path"])
            if conf.get("from", None) == "now":
                filesize = "-c0"
            else:
                filesize = "-c%s" % (int(stat("-nf" ,'%z', conf["path"])) * 2 + 10)
            log = tail("-F", filesize, conf["path"], _iter=True)

        for n, line in enumerate(log):
            if merge:
                if before.match(line):
                    if value.has_data():
                        yield value.dict()
                        # time.sleep(0.1)
                    value.build()
                value.log(line)
            else:
                value.build(line)
                yield value.dict()
Esempio n. 14
0
def create_vm(vm_name_prefix, vm_image):
    # _ is not valid in a vm name
    vm_name = '{}-{}'.format(vm_name_prefix.replace('_', '-'), RESOURCE_GROUP)
    vm_name = hashlib.md5(vm_name.encode()).hexdigest()[:10]
    az([
        'vm', 'create', '-g', RESOURCE_GROUP, '-n', vm_name,
        '--authentication-type', 'ssh', '--image', vm_image,
        '--admin-username', VM_USERNAME, '--deployment-name', vm_name
    ])
    io = StringIO()
    az([
        'vm', 'list-ip-addresses', '--resource-group', RESOURCE_GROUP,
        '--name', vm_name, '--query',
        '[0].virtualMachine.network.publicIpAddresses[0].ipAddress'
    ],
       _out=io)
    vm_ip_address = io.getvalue().strip().replace('"', '')
    io.close()
    vm = ssh.bake([
        '-oStrictHostKeyChecking=no', '-tt',
        "{}@{}".format(VM_USERNAME, vm_ip_address)
    ])
    return vm
Esempio n. 15
0
def send_email(user, event, message, **kw):
    if not user in config['email']['user_emails']:
        return

    args = {
        'f': config['email']['from_address'],
        't': config['email']['user_emails'][user],
        'u': kw['subject'] if 'subject' in kw else 'Notification',
    }

    if not 'app' in kw:
        kw['app'] = config['default_app']

    body = HTML('html')
    tr = body.table().tr()
    tr.td(valign='top').img(src=config['icons'][kw['app']],
                            style='float:left; margin: 15px')
    try:
        if 'email_body' in kw:
            tr.td().text(kw['email_body'], escape=False)
        else:
            getattr(notifications, event + '_email')(tr.td(), message, **kw)
    except:
        with tr.td().p(style='margin-top: 15px') as p:
            p.b("Message:")
            p.br()
            p.text(message)

    ip = curl('ifconfig.me').strip()
    if ip != config['ip']:
        ybdst = ssh.bake(config['ip'])
        print "Sent %s email to %s" % (event, user)
        return ybdst.sendemail(_in=str(body), **args)
    else:
        print "Sent %s email to %s" % (event, user)
        return sendemail(_in=str(body), **args)
Esempio n. 16
0
File: swarm.py Progetto: bholt/ipa
MASTER = 'platypus'
AGENTS = ['sloth', 'rhinoceros']

DOCKER_PORT = 2376
SWARM_PORT = 4000
BRIDGE = 'swarm'

CONSUL = '10.100.1.17'
CONSUL_PORT = 8500
CONSUL_LOG = '/var/log/consul'

NETWORK = 'owl'

hosts = [MASTER] + AGENTS
machines = [ ssh.bake(host) for host in hosts ]

consul = "/homes/sys/bholt/bin/consul"

swarm_url = fmt("tcp://#{MASTER}:#{SWARM_PORT}")

swarm  = docker.bake(host=swarm_url)
master = ssh.bake(MASTER)


LIVE = {'_out': sys.stdout, '_err': sys.stderr}


def docker(host):
    return sh.docker.bake("--host=tcp://{}:{}".format(host, DOCKER_PORT))
Esempio n. 17
0
from sh import git, ssh


branch = git.bake('branch')
myssh = ssh.bake('username@hostname')

        reconfigure_targets(target1, target2, status)

    if "TCP_big" in TESTS:
        server_cmd = """ncat -6 -e /bin/cat -k -l -p 3333"""

        s = server(server_cmd, _bg=True)

        time.sleep(1)  # wait for the server to be ready

        for addr in TARGET1_ADDR:
            status = tcp_tests(client, 'b' * 1000, addr, 3333)
            write_report(f, status, "TCP test on %s (port 3333 - payload size 1000)" % addr)
        s.terminate()
        # ugly but necessary
        server("pkill ncat")

        status = targets_alive(target1, target2)
        write_report(f, status, "status after TCP test (payload size 1000) on port 3333", crash=True)
        reconfigure_targets(target1, target2, status)

    print "a report has been written in 'report.txt'"
    f.close()

if __name__ == "__main__":
    if len(sys.argv) != 3:
        usage()
        sys.exit(-1)

    ssh = ssh.bake("-lroot")
    run_test(sys.argv[1], sys.argv[2])
def run_test(target1, target2):
    print "starting regression script for the Linux 6LoWPAN stack"
    if not all(targets_alive(target1, target2)):
        print "exiting test application"
        sys.exit(-1)
    print "%s and %s both seem reachable, good!" % (target1, target2)

    # test if password-less ssh login is possible
    ret = ssh(target1, "echo test1234", _out=ssh_interact, _out_bufsize=0, _tty=True)
    ret.wait()
    if ret.exit_code == -9:
        sys.exit(-1)
    ret = ssh(target2, "echo test1234", _out=ssh_interact, _out_bufsize=0, _tty=True)
    ret.wait()
    if ret.exit_code == -9:
        sys.exit(-1)

    f = open("report.txt", 'w')
    f.write("test, server, client\n")

    configure_targets(target1, target2)

    status = targets_alive(target1, target2)
    write_report(f, status, "setting addresses", crash=True)
    if not all(status):
        print "unable to properly set the IPv6 addresses, "\
              "the regression test will stop here"
        sys.exit(-1)

    server = ssh.bake("-o TCPKeepAlive=yes", "-o ServerAliveInterval=10", target1)
    client = ssh.bake("-o TCPKeepAlive=yes", "-o ServerAliveInterval=10", target2)

    # ICMP tests
    print "running ICMPv6 tests"
    if "ICMPv6_reg" in TESTS:
        for addr in TARGET1_ADDR:
            res = client.ping6(addr, I=TARGET1_IF, c=3, _out=False)
            if res.exit_code != 0:
                status = (False, False)
            else:
                status = (True, True)
            write_report(f, status, "(regular) ping6 on %s" % addr)
        status = targets_alive(target1, target2)
        write_report(f, status, "status after (regular) ping6 test", crash=True)
        reconfigure_targets(target1, target2, status)

    if "ICMPv6_large" in TESTS:
        for addr in TARGET1_ADDR:
            res = client.ping6(addr, I=TARGET1_IF, c=3, s=700, _out=False)
            if res.exit_code != 0:
                status = (False, False)
            else:
                status = (True, True)
            write_report(f, status, "(large) ping6 on %s" % addr)

        status = targets_alive(target1, target2)
        write_report(f, status, "status after (large) ping6 test", crash=True)
        reconfigure_targets(target1, target2, status)

    # UDP tests
    print "running UDP tests"
    if "UDP_small_low_port" in TESTS:
        server_cmd = """ncat -6 -e /bin/cat -u -k -l -p 4444"""

        s = server(server_cmd, _bg=True)

        for addr in TARGET1_ADDR:
            status = udp_tests(client, 'a' * 64, addr, 4444)
            write_report(f, status, "UDP test on %s (port 4444 - payload size 64)" % addr)
        s.terminate()
        # ugly but necessary
        server("pkill ncat")

        status = targets_alive(target1, target2)
        write_report(f, status, "status after small UDP test (payload size 64) on port 4444", crash=True)
        reconfigure_targets(target1, target2, status)

    if "UDP_small_high_port" in TESTS:
        server_cmd = """ncat -6 -e /bin/cat -u -k -l -p 61617"""

        s = server(server_cmd, _bg=True)

        for addr in TARGET1_ADDR:
            status = udp_tests(client, 'a' * 64, addr, 61617)
            write_report(f, status, "UDP test on %s (port 61617 - payload size 64)" % addr)
        s.terminate()
        # ugly but necessary
        server("pkill ncat")

        status = targets_alive(target1, target2)
        write_report(f, status, "status after small UDP test (payload size 64) on port 61617", crash=True)
        reconfigure_targets(target1, target2, status)

    if "UDP_large" in TESTS:
        server_cmd = """ncat -6 -e /bin/cat -u -k -l -p 4444"""

        s = server(server_cmd, _bg=True)

        for addr in TARGET1_ADDR:
            status = udp_tests(client, 'a' * 700, addr, 4444)
            write_report(f, status, "UDP test on %s (port 4444 - payload size 700)" % addr)
        s.terminate()
        # ugly but necessary
        server("pkill ncat")

        status = targets_alive(target1, target2)
        write_report(f, status, "status after UDP test (payload size 700) on port 61617", crash=True)
        reconfigure_targets(target1, target2, status)

    # TCP tests
    print "running TCP tests"
    if "TCP_small" in TESTS:
        server_cmd = """ncat -6 -e /bin/cat -k -l -p 3333"""

        s = server(server_cmd, _bg=True)

        time.sleep(1)

        for addr in TARGET1_ADDR:
            status = tcp_tests(client, 'b' * 64, addr, 3333)
            write_report(f, status, "TCP test on %s (payload size 64)" % addr)
        s.terminate()
        # ugly but necessary
        server("pkill ncat")

        status = targets_alive(target1, target2)
        write_report(f, status, "status after TCP test (payload size 64) on port 3333", crash=True)
        reconfigure_targets(target1, target2, status)

    if "TCP_big" in TESTS:
        server_cmd = """ncat -6 -e /bin/cat -k -l -p 3333"""

        s = server(server_cmd, _bg=True)

        time.sleep(1)  # wait for the server to be ready

        for addr in TARGET1_ADDR:
            status = tcp_tests(client, 'b' * 1000, addr, 3333)
            write_report(f, status, "TCP test on %s (port 3333 - payload size 1000)" % addr)
        s.terminate()
        # ugly but necessary
        server("pkill ncat")

        status = targets_alive(target1, target2)
        write_report(f, status, "status after TCP test (payload size 1000) on port 3333", crash=True)
        reconfigure_targets(target1, target2, status)

    print "a report has been written in 'report.txt'"
    f.close()
Esempio n. 20
0
def main():
    print_env_vars()
    time_str = datetime.utcnow().strftime('%Y%m%d%H%M%S')
    az(["login"], _out=sys.stdout, _err=sys.stdout)
    resource_group = 'azurecli-release-debian-' + time_str
    vm_name = 'vm-debian-' + time_str
    print_status('Creating resource group.')
    az(['group', 'create', '-l', 'westus', '-n', resource_group], _out=sys.stdout, _err=sys.stdout)
    print_status('Creating VM.')
    az(['vm', 'create', '-g', resource_group, '-n', vm_name, '--generate-ssh-keys', '--authentication-type', 'ssh',
        '--image', 'Canonical:UbuntuServer:14.04.4-LTS:latest', '--admin-username', 'ubuntu'],
       _out=sys.stdout, _err=sys.stdout)
    io = StringIO()
    print_status('Getting VM IP address.')
    az(['vm', 'list-ip-addresses', '--resource-group', resource_group, '--name', vm_name,
        '--query', '[0].virtualMachine.network.publicIpAddresses[0].ipAddress'], _out=io)
    ip_address = io.getvalue().strip().replace('"', '')
    print_status('VM IP address is {}'.format(ip_address))
    io.close()
    vm_connect_str = "ubuntu@{}".format(ip_address)
    my_vm = ssh.bake(['-oStrictHostKeyChecking=no', vm_connect_str])
    print_status('Installing git.')
    my_vm(['sudo', 'apt-get', 'update', '&&', 'sudo', 'apt-get', 'install', '-y', 'git'],
          _out=sys.stdout, _err=sys.stdout)
    io = StringIO()
    my_vm(['mktemp', '-d'], _out=io)
    repo_dir = io.getvalue().strip()
    io.close()
    print_status('Cloning repo.')
    my_vm(['git', 'clone', 'https://github.com/{}'.format(script_env.get('REPO_NAME')), repo_dir], _out=sys.stdout, _err=sys.stdout)
    path_to_debian_build_script = os.path.join(repo_dir, 'packaged_releases', 'debian', 'debian_build.sh')
    path_to_dir_creator = os.path.join(repo_dir, 'packaged_releases', 'debian', 'debian_dir_creator.sh')
    io = StringIO()
    my_vm(['mktemp', '-d'], _out=io)
    build_artifact_dir = io.getvalue().strip()
    io.close()
    print_status('Running debian build scripts.')
    my_vm(['chmod', '+x', path_to_debian_build_script, path_to_dir_creator], _out=sys.stdout, _err=sys.stdout)
    my_vm(['export', 'CLI_VERSION={}'.format(script_env.get('CLI_VERSION')), '&&',
           'export', 'CLI_DOWNLOAD_SHA256={}'.format(script_env.get('CLI_DOWNLOAD_SHA256')), '&&',
           'export', 'BUILD_ARTIFACT_DIR={}'.format(build_artifact_dir), '&&',
           path_to_debian_build_script, path_to_dir_creator],
          _out=sys.stdout, _err=sys.stdout)
    print_status('Debian build complete.')
    io = StringIO()
    my_vm(['ls', build_artifact_dir], _out=io)
    artifact_name = io.getvalue().strip()
    io.close()
    deb_file_path = os.path.join(build_artifact_dir, artifact_name)
    print_status('Installing the .deb on the build machine')
    my_vm(['sudo', 'dpkg', '-i', deb_file_path], _out=sys.stdout, _err=sys.stdout)
    # Upload to Azure Storage
    print_status('Uploading .deb to Azure storage.')
    my_vm(['az', 'storage', 'container', 'create', '--name', 'repos', '--public-access', 'blob',
           '--connection-string', '"{}"'.format(script_env.get('AZURE_STORAGE_CONNECTION_STRING'))],
          _out=sys.stdout, _err=sys.stdout)
    my_vm(['az', 'storage', 'blob', 'upload', '-f', deb_file_path,
           '-n', artifact_name, '-c', 'repos', '--connection-string', '"{}"'.format(script_env.get('AZURE_STORAGE_CONNECTION_STRING'))],
          _out=sys.stdout, _err=sys.stdout)
    io = StringIO()
    my_vm(['az', 'storage', 'blob', 'url', '-n', artifact_name, '-c', 'repos', '--output', 'tsv',
           '--connection-string', '"{}"'.format(script_env.get('AZURE_STORAGE_CONNECTION_STRING'))], _out=io)
    deb_url = io.getvalue().strip()
    io.close()
    print_status('Debian file uploaded to the following URL.')
    print_status(deb_url)
    # Publish to apt service
    my_vm(['wget', '-q', 'https://bootstrap.pypa.io/get-pip.py'], _out=sys.stdout, _err=sys.stdout)
    my_vm(['sudo', 'python', 'get-pip.py'], _out=sys.stdout, _err=sys.stdout)
    my_vm(['sudo', 'pip', 'install', '--upgrade', 'requests'], _out=sys.stdout, _err=sys.stdout)
    upload_script = REPO_UPLOAD_SCRIPT_TMPL.format(cli_version=script_env.get('CLI_VERSION'),
                                                   repo_id=script_env.get('DEBIAN_REPO_ID'),
                                                   source_url=deb_url,
                                                   repo_package_url=script_env.get('MS_REPO_URL'),
                                                   repo_user=script_env.get('MS_REPO_USERNAME'),
                                                   repo_pass=script_env.get('MS_REPO_PASSWORD'))
    my_vm(['echo', '-e', '"{}"'.format(upload_script), '>>', 'repo_upload.py'], _out=sys.stdout, _err=sys.stdout)
    my_vm(['python', 'repo_upload.py'], _out=sys.stdout, _err=sys.stdout)
    print_status('Done. :)')
    give_chance_to_cancel('Delete resource group (in background)')
    az(['group', 'delete', '--name', resource_group, '--yes', '--no-wait'], _out=sys.stdout, _err=sys.stdout)
    print_status('Finished. :)')
Esempio n. 21
0
    def prepare(self):
        """Populate the environment of the process with the necessary values.

        In addition, it creates helper objects to run shell and SSH processes.
        """
        # Populate environment with required variables
        os.environ["HS2_HOST_PORT"] = self.config["hs2"]
        os.environ["HDFS_NN"] = self.config["namenode"]
        os.environ["IMPALAD"] = self.config["impalad"][0]
        os.environ["REMOTE_LOAD"] = "1"
        os.environ["HADOOP_USER_NAME"] = "hdfs"
        os.environ["TEST_WAREHOUSE_DIR"] = self.config[
            "hive_warehouse_directory"]
        os.environ["KUDU_MASTER"] = self.config["kudu_master"]

        if self.options.snapshot_file is None:
            if "SNAPSHOT_DIR" in os.environ:
                snapshot_dir = os.environ["SNAPSHOT_DIR"]
            else:
                snapshot_dir = "{0}/testdata/test-warehouse-SNAPSHOT".format(
                    os.getenv("WORKSPACE"))
            if not os.path.isdir(snapshot_dir):
                err_msg = 'Snapshot directory "{0}" is not a valid directory'
                logger.error(err_msg.format(snapshot_dir))
                raise OSError("Could not find test-warehouse snapshot file.")

            logger.info("Snapshot directory: {0}".format(snapshot_dir))
            self.snapshot_file = self.find_snapshot_file(snapshot_dir)
        else:
            self.snapshot_file = self.options.snapshot_file

        # Prepare shortcuts for connecting to remote services
        self.gtw_ssh = ssh.bake("{0}@{1}".format(self.options.ssh_user,
                                                 self.gateway),
                                "-oStrictHostKeyChecking=no",
                                "-oUserKnownHostsFile=/dev/null",
                                t=True,
                                _out=tee,
                                _err=tee)

        self.beeline = sh.beeline.bake(silent=False,
                                       outputformat="csv2",
                                       n="impala",
                                       u="jdbc:hive2://{0}/default".format(
                                           self.config["hs2"]))

        self.load_test_warehouse = sh.Command(
            "{0}/testdata/bin/load-test-warehouse-snapshot.sh".format(
                self.impala_home)).bake(_out=tee, _err=tee)

        self.create_load_data = sh.Command(
            "{0}/testdata/bin/create-load-data.sh".format(self.impala_home))

        self.main_impalad = self.config["impalad"][0]
        self.impala_shell = sh.Command("impala-shell.sh").bake(
            i=self.main_impalad, _out=tee, _err=tee)

        self.python = sh.Command("impala-python").bake(u=True)
        self.compute_stats = sh.Command(
            "{0}/testdata/bin/compute-table-stats.sh".format(
                self.impala_home)).bake(_out=tee, _err=tee)
Esempio n. 22
0
def main():
    print_env_vars()
    time_str = datetime.utcnow().strftime('%Y%m%d%H%M%S')
    az(["login"], _out=sys.stdout, _err=sys.stdout)
    resource_group = 'azurecli-release-rpm-' + time_str
    vm_name = 'vm-rpm-' + time_str
    print_status('Creating resource group.')
    az(['group', 'create', '-l', 'westus', '-n', resource_group], _out=sys.stdout, _err=sys.stdout)
    print_status('Creating VM.')
    az(['vm', 'create', '-g', resource_group, '-n', vm_name, '--generate-ssh-keys', '--authentication-type', 'ssh',
        '--image', 'OpenLogic:CentOS:7.3:latest', '--admin-username', 'myuser'],
       _out=sys.stdout, _err=sys.stdout)
    io = StringIO()
    print_status('Getting VM IP address.')
    az(['vm', 'list-ip-addresses', '--resource-group', resource_group, '--name', vm_name,
        '--query', '[0].virtualMachine.network.publicIpAddresses[0].ipAddress'], _out=io)
    ip_address = io.getvalue().strip().replace('"', '')
    print_status('VM IP address is {}'.format(ip_address))
    io.close()
    vm_connect_str = "myuser@{}".format(ip_address)
    my_vm = ssh.bake(['-oStrictHostKeyChecking=no', vm_connect_str])
    print_status('Installing git.')
    build_prereqs = "sudo yum update -y && sudo yum install -y git gcc rpm-build rpm-devel rpmlint make python bash coreutils " \
                    "diffutils patch rpmdevtools python libffi-devel python-devel openssl-devel"
    my_vm(build_prereqs.split(),
          _out=sys.stdout, _err=sys.stdout)
    my_vm("mkdir -p ~/rpmbuild/BUILD ~/rpmbuild/RPMS ~/rpmbuild/SOURCES ~/rpmbuild/SPECS ~/rpmbuild/SRPMS".split(), _out=io)
    io = StringIO()
    my_vm(['mktemp', '-d'], _out=io)
    repo_dir = io.getvalue().strip()
    io.close()
    print_status('Cloning repo.')
    my_vm(['git', 'clone', 'https://github.com/{}'.format(script_env.get('REPO_NAME')), repo_dir], _out=sys.stdout, _err=sys.stdout)
    path_to_spec_file = os.path.join(repo_dir, 'packaged_releases', 'rpm', 'azure-cli.spec')
    print_status('Running build script.')
    my_vm(['export', 'CLI_VERSION={}'.format(script_env.get('CLI_VERSION')), '&&',
           'export', 'CLI_DOWNLOAD_SHA256={}'.format(script_env.get('CLI_DOWNLOAD_SHA256')), '&&',
           'rpmbuild', '-v', '-bb', '--clean', path_to_spec_file],
          _out=sys.stdout, _err=sys.stdout)
    print_status('Build complete.')
    io = StringIO()
    my_vm(['ls', '~/rpmbuild/RPMS/*/*'], _out=io)
    rpm_file_path = io.getvalue().strip()
    io.close()
    artifact_name = rpm_file_path.split('/')[-1]
    print_status('Installing the .rpm on the build machine')
    my_vm(['sudo', 'rpm', '-i', rpm_file_path], _out=sys.stdout, _err=sys.stdout)
    # Upload to Azure Storage
    print_status('Uploading .rpm to Azure storage.')
    my_vm(['az', 'storage', 'container', 'create', '--name', 'rpms', '--public-access', 'blob',
           '--connection-string', '"{}"'.format(script_env.get('AZURE_STORAGE_CONNECTION_STRING'))],
          _out=sys.stdout, _err=sys.stdout)
    my_vm(['az', 'storage', 'blob', 'upload', '-f', rpm_file_path,
           '-n', artifact_name, '-c', 'rpms', '--connection-string', '"{}"'.format(script_env.get('AZURE_STORAGE_CONNECTION_STRING'))],
          _out=sys.stdout, _err=sys.stdout)
    io = StringIO()
    my_vm(['az', 'storage', 'blob', 'url', '-n', artifact_name, '-c', 'rpms', '--output', 'tsv',
           '--connection-string', '"{}"'.format(script_env.get('AZURE_STORAGE_CONNECTION_STRING'))], _out=io)
    rpm_url = io.getvalue().strip()
    io.close()
    print_status('RPM file uploaded to the following URL.')
    print_status(rpm_url)
    # Publish to service
    my_vm(['wget', '-q', 'https://bootstrap.pypa.io/get-pip.py'], _out=sys.stdout, _err=sys.stdout)
    my_vm(['sudo', 'python', 'get-pip.py'], _out=sys.stdout, _err=sys.stdout)
    my_vm(['sudo', 'pip', 'install', '--upgrade', 'requests'], _out=sys.stdout, _err=sys.stdout)
    upload_script = REPO_UPLOAD_SCRIPT_TMPL.format(cli_version=script_env.get('CLI_VERSION'),
                                                   repo_id=script_env.get('YUM_REPO_ID'),
                                                   source_url=rpm_url,
                                                   repo_package_url=script_env.get('MS_REPO_URL'),
                                                   repo_user=script_env.get('MS_REPO_USERNAME'),
                                                   repo_pass=script_env.get('MS_REPO_PASSWORD'))
    my_vm(['echo', '-e', '"{}"'.format(upload_script), '>>', 'repo_upload.py'], _out=sys.stdout, _err=sys.stdout)
    # Keeping this code commented for when we can automate the signing of RPM packages.
    # my_vm(['python', 'repo_upload.py'], _out=sys.stdout, _err=sys.stdout)
    print_status('PRINTING OUT REPO UPLOAD SCRIPT AS THE UNSIGNED RPM NEEDS TO BE FIRST SIGNED BEFORE UPLOADING...')
    my_vm(['cat', 'repo_upload.py'], _out=sys.stdout, _err=sys.stdout)
    print_status('Done. :)')
    give_chance_to_cancel('Delete resource group (in background)')
    az(['group', 'delete', '--name', resource_group, '--yes', '--no-wait'], _out=sys.stdout, _err=sys.stdout)
    print_status('Finished. :)')
Esempio n. 23
0
#coding=utf8
from sh import ls
from sh import ssh
#Baking


ls = ls.bake("-la")
print(ls)

# resolves to "ls -la /"
print(ls("/"))

# calling whoami on a server
iam1 = ssh("[email protected]", "-p 22", "ifconfig")
print(iam1)

# resolves to "ssh [email protected] -p 22 whoami"
myserver = ssh.bake("[email protected]", p=22)
print(myserver)

iam2 = myserver.whoami()
print(iam2)


# resolves to "sh [email protected] -p 22  tail /root/InterfacePCTest/logs/test-account.log  -n 100"
iam3 = myserver.tail("/root/InterfacePCTest/logs/test-account.log", n=100)
print(repr(iam3))
Esempio n. 24
0
import json
from datetime import datetime
from io import StringIO
from textwrap import dedent
from uuid import uuid4

from everett.manager import parse_env_file
from sh import pushd, ssh, ErrorReturnCode
from sh.contrib import git

from webhook_deploy import settings

SSH_CONNECT_STRING = f'{settings.SSH_DOKKU_USER}@{settings.SSH_DOKKU_HOST}'

dokku = ssh.bake('-tp', settings.SSH_DOKKU_PORT, SSH_CONNECT_STRING, '--')


def apps_list():
    apps = StringIO(str(dokku('apps:list')))
    return [app.strip() for app in apps if not app.startswith('===')]


def apps_create(app_name):
    try:
        dokku('apps:create', app_name)
    except ErrorReturnCode:
        # app already exists
        pass


def apps_destroy(app_name):
Esempio n. 25
0
File: swarm.py Progetto: bholt/ipa
def on(host):
    return ssh.bake(host)
Esempio n. 26
0
 def __init__(self, user, server):
     self.ssh = ssh.bake('{}@{}'.format(user, server))
Esempio n. 27
0
def main():
    print_env_vars()
    time_str = datetime.utcnow().strftime('%Y%m%d%H%M%S')
    az(["login"], _out=sys.stdout, _err=sys.stdout)
    resource_group = 'azurecli-release-docker-' + time_str
    vm_name = 'vm-docker-' + time_str
    print_status('Creating resource group.')
    az(['group', 'create', '-l', 'westus', '-n', resource_group],
       _out=sys.stdout,
       _err=sys.stdout)
    print_status('Creating VM.')
    az([
        'vm', 'create', '-g', resource_group, '-n', vm_name,
        '--generate-ssh-keys', '--authentication-type', 'ssh', '--image',
        'Canonical:UbuntuServer:16.04-LTS:latest', '--admin-username', 'ubuntu'
    ],
       _out=sys.stdout,
       _err=sys.stdout)
    io = StringIO()
    print_status('Getting VM IP address.')
    az([
        'vm', 'list-ip-addresses', '--resource-group', resource_group,
        '--name', vm_name, '--query',
        '[0].virtualMachine.network.publicIpAddresses[0].ipAddress'
    ],
       _out=io)
    ip_address = io.getvalue().strip().replace('"', '')
    print_status('VM IP address is {}'.format(ip_address))
    io.close()
    vm_connect_str = "ubuntu@{}".format(ip_address)
    my_vm = ssh.bake(['-oStrictHostKeyChecking=no', vm_connect_str])
    print_status('Installing Docker.')
    my_vm([
        'curl', '-sSL', 'https://get.docker.com/', '-o',
        'docker_install_script.sh'
    ],
          _out=sys.stdout,
          _err=sys.stdout)
    my_vm(['sh', 'docker_install_script.sh'], _out=sys.stdout, _err=sys.stdout)
    print_status('Docker installed.')
    io = StringIO()
    my_vm(['mktemp', '-d'], _out=io)
    repo_dir = io.getvalue().strip()
    io.close()
    print_status('Cloning repo.')
    my_vm([
        'git', 'clone', 'https://github.com/{}'.format(
            script_env.get('REPO_NAME')), repo_dir
    ],
          _out=sys.stdout,
          _err=sys.stdout)
    image_tag = '{}:{}'.format(script_env.get('DOCKER_REPO'),
                               script_env.get('CLI_VERSION'))
    path_to_dockerfile = os.path.join(repo_dir, 'packaged_releases', 'docker',
                                      'Dockerfile')
    path_to_docker_context = os.path.join(repo_dir, 'packaged_releases',
                                          'docker')
    print_status('Running Docker build.')
    my_vm([
        'sudo', 'docker', 'build', '--no-cache', '--build-arg',
        'BUILD_DATE="`date -u +"%Y-%m-%dT%H:%M:%SZ"`"', '--build-arg',
        'CLI_VERSION={}'.format(script_env.get('CLI_VERSION')), '--build-arg',
        'CLI_DOWNLOAD_SHA256={}'.format(script_env.get('CLI_DOWNLOAD_SHA256')),
        '-f', path_to_dockerfile, '-t', image_tag, path_to_docker_context
    ],
          _out=sys.stdout,
          _err=sys.stdout)
    print_status('Docker build complete.')
    print_status('Running Docker log in.')
    my_vm([
        'sudo', 'docker', 'login', '--username',
        script_env.get('DOCKER_USERNAME'), '--password', '"{}"'.format(
            script_env.get('DOCKER_PASSWORD'))
    ],
          _out=sys.stdout,
          _err=sys.stdout)
    print_status('Running Docker push.')
    my_vm(['sudo', 'docker', 'push', image_tag],
          _out=sys.stdout,
          _err=sys.stdout)
    print_status('Image pushed to Docker Hub.')
    print_status('Done. :)')
    give_chance_to_cancel('Delete resource group (in background)')
    az(['group', 'delete', '--name', resource_group, '--yes', '--no-wait'],
       _out=sys.stdout,
       _err=sys.stdout)
    print_status('Finished. :)')
Esempio n. 28
0
def main():
    print_env_vars()
    time_str = datetime.utcnow().strftime('%Y%m%d%H%M%S')
    az(["login"], _out=sys.stdout, _err=sys.stdout)
    resource_group = 'azurecli-release-debian-' + time_str
    vm_name = 'vm-debian-' + time_str
    print_status('Creating resource group.')
    az(['group', 'create', '-l', 'westus', '-n', resource_group], _out=sys.stdout, _err=sys.stdout)
    print_status('Creating VM.')
    az(['vm', 'create', '-g', resource_group, '-n', vm_name, '--generate-ssh-keys', '--authentication-type', 'ssh',
        '--image', 'Canonical:UbuntuServer:14.04.4-LTS:latest', '--admin-username', 'ubuntu'],
       _out=sys.stdout, _err=sys.stdout)
    io = StringIO()
    print_status('Getting VM IP address.')
    az(['vm', 'list-ip-addresses', '--resource-group', resource_group, '--name', vm_name,
        '--query', '[0].virtualMachine.network.publicIpAddresses[0].ipAddress'], _out=io)
    ip_address = io.getvalue().strip().replace('"', '')
    print_status('VM IP address is {}'.format(ip_address))
    io.close()
    vm_connect_str = "ubuntu@{}".format(ip_address)
    my_vm = ssh.bake(['-oStrictHostKeyChecking=no', vm_connect_str])
    print_status('Installing git.')
    my_vm(['sudo', 'apt-get', 'update', '&&', 'sudo', 'apt-get', 'install', '-y', 'git'],
          _out=sys.stdout, _err=sys.stdout)
    io = StringIO()
    my_vm(['mktemp', '-d'], _out=io)
    repo_dir = io.getvalue().strip()
    io.close()
    print_status('Cloning repo.')
    my_vm(['git', 'clone', 'https://github.com/{}'.format(script_env.get('REPO_NAME')), repo_dir], _out=sys.stdout, _err=sys.stdout)
    path_to_debian_build_script = os.path.join(repo_dir, 'packaged_releases', 'debian', 'debian_build.sh')
    path_to_dir_creator = os.path.join(repo_dir, 'packaged_releases', 'debian', 'debian_dir_creator.sh')
    io = StringIO()
    my_vm(['mktemp', '-d'], _out=io)
    build_artifact_dir = io.getvalue().strip()
    io.close()
    print_status('Running debian build scripts.')
    my_vm(['chmod', '+x', path_to_debian_build_script, path_to_dir_creator], _out=sys.stdout, _err=sys.stdout)
    my_vm(['export', 'CLI_VERSION={}'.format(script_env.get('CLI_VERSION')), '&&',
           'export', 'CLI_DOWNLOAD_SHA256={}'.format(script_env.get('CLI_DOWNLOAD_SHA256')), '&&',
           'export', 'BUILD_ARTIFACT_DIR={}'.format(build_artifact_dir), '&&',
           path_to_debian_build_script, path_to_dir_creator],
          _out=sys.stdout, _err=sys.stdout)
    print_status('Debian build complete.')
    io = StringIO()
    my_vm(['ls', build_artifact_dir], _out=io)
    artifact_name = io.getvalue().strip()
    io.close()
    deb_file_path = os.path.join(build_artifact_dir, artifact_name)
    print_status('Installing the .deb on the build machine')
    my_vm(['sudo', 'dpkg', '-i', deb_file_path], _out=sys.stdout, _err=sys.stdout)
    # Upload to Azure Storage
    print_status('Uploading .deb to Azure storage.')
    my_vm(['az', 'storage', 'container', 'create', '--name', 'repos', '--public-access', 'blob',
           '--connection-string', '"{}"'.format(script_env.get('AZURE_STORAGE_CONNECTION_STRING'))],
          _out=sys.stdout, _err=sys.stdout)
    my_vm(['az', 'storage', 'blob', 'upload', '-f', deb_file_path,
           '-n', artifact_name, '-c', 'repos', '--connection-string', '"{}"'.format(script_env.get('AZURE_STORAGE_CONNECTION_STRING'))],
          _out=sys.stdout, _err=sys.stdout)
    io = StringIO()
    my_vm(['az', 'storage', 'blob', 'url', '-n', artifact_name, '-c', 'repos', '--output', 'tsv',
           '--connection-string', '"{}"'.format(script_env.get('AZURE_STORAGE_CONNECTION_STRING'))], _out=io)
    deb_url = io.getvalue().strip()
    io.close()
    print_status('Debian file uploaded to the following URL.')
    print_status(deb_url)
    # Publish to apt service
    my_vm(['wget', '-q', 'https://bootstrap.pypa.io/get-pip.py'], _out=sys.stdout, _err=sys.stdout)
    my_vm(['sudo', 'python', 'get-pip.py'], _out=sys.stdout, _err=sys.stdout)
    my_vm(['sudo', 'pip', 'install', '--upgrade', 'requests'], _out=sys.stdout, _err=sys.stdout)
    upload_script = REPO_UPLOAD_SCRIPT_TMPL.format(cli_version=script_env.get('CLI_VERSION'),
                                                   repo_id=script_env.get('DEBIAN_REPO_ID'),
                                                   source_url=deb_url,
                                                   repo_package_url=script_env.get('DEBIAN_REPO_URL'),
                                                   repo_user=script_env.get('DEBIAN_REPO_USERNAME'),
                                                   repo_pass=script_env.get('DEBIAN_REPO_PASSWORD'))
    my_vm(['echo', '-e', '"{}"'.format(upload_script), '>>', 'repo_upload.py'], _out=sys.stdout, _err=sys.stdout)
    my_vm(['python', 'repo_upload.py'], _out=sys.stdout, _err=sys.stdout)
    print_status('Done. :)')
    give_chance_to_cancel('Delete resource group (in background)')
    az(['group', 'delete', '--name', resource_group, '--yes', '--no-wait'], _out=sys.stdout, _err=sys.stdout)
    print_status('Finished. :)')
def main():
    print_env_vars()
    time_str = datetime.utcnow().strftime('%Y%m%d%H%M%S')
    az(["login"], _out=sys.stdout, _err=sys.stdout)
    resource_group = 'azurecli-release-rpm-' + time_str
    vm_name = 'vm-rpm-' + time_str
    print_status('Creating resource group.')
    az(['group', 'create', '-l', 'westus', '-n', resource_group],
       _out=sys.stdout,
       _err=sys.stdout)
    print_status('Creating VM.')
    az([
        'vm', 'create', '-g', resource_group, '-n', vm_name,
        '--generate-ssh-keys', '--authentication-type', 'ssh', '--image',
        'OpenLogic:CentOS:7.3:latest', '--admin-username', 'myuser'
    ],
       _out=sys.stdout,
       _err=sys.stdout)
    io = StringIO()
    print_status('Getting VM IP address.')
    az([
        'vm', 'list-ip-addresses', '--resource-group', resource_group,
        '--name', vm_name, '--query',
        '[0].virtualMachine.network.publicIpAddresses[0].ipAddress'
    ],
       _out=io)
    ip_address = io.getvalue().strip().replace('"', '')
    print_status('VM IP address is {}'.format(ip_address))
    io.close()
    vm_connect_str = "myuser@{}".format(ip_address)
    my_vm = ssh.bake(['-oStrictHostKeyChecking=no', vm_connect_str])
    print_status('Installing git.')
    build_prereqs = "sudo yum update -y && sudo yum install -y git gcc rpm-build rpm-devel rpmlint make python bash coreutils " \
                    "diffutils patch rpmdevtools python libffi-devel python-devel openssl-devel"
    my_vm(build_prereqs.split(), _out=sys.stdout, _err=sys.stdout)
    my_vm(
        "mkdir -p ~/rpmbuild/BUILD ~/rpmbuild/RPMS ~/rpmbuild/SOURCES ~/rpmbuild/SPECS ~/rpmbuild/SRPMS"
        .split(),
        _out=io)
    io = StringIO()
    my_vm(['mktemp', '-d'], _out=io)
    repo_dir = io.getvalue().strip()
    io.close()
    print_status('Cloning repo.')
    my_vm([
        'git', 'clone', 'https://github.com/{}'.format(
            script_env.get('REPO_NAME')), repo_dir
    ],
          _out=sys.stdout,
          _err=sys.stdout)
    path_to_spec_file = os.path.join(repo_dir, 'packaged_releases', 'rpm',
                                     'azure-cli.spec')
    print_status('Running build script.')
    my_vm([
        'export', 'CLI_VERSION={}'.format(script_env.get('CLI_VERSION')), '&&',
        'export', 'CLI_DOWNLOAD_SHA256={}'.format(
            script_env.get('CLI_DOWNLOAD_SHA256')), '&&', 'rpmbuild', '-v',
        '-bb', '--clean', path_to_spec_file
    ],
          _out=sys.stdout,
          _err=sys.stdout)
    print_status('Build complete.')
    io = StringIO()
    my_vm(['ls', '~/rpmbuild/RPMS/*/*'], _out=io)
    rpm_file_path = io.getvalue().strip()
    io.close()
    artifact_name = rpm_file_path.split('/')[-1]
    print_status('Installing the .rpm on the build machine')
    my_vm(['sudo', 'rpm', '-i', rpm_file_path],
          _out=sys.stdout,
          _err=sys.stdout)
    # Upload to Azure Storage
    print_status('Uploading .rpm to Azure storage.')
    my_vm([
        'az', 'storage', 'container', 'create', '--name', 'rpms',
        '--public-access', 'blob', '--connection-string', '"{}"'.format(
            script_env.get('AZURE_STORAGE_CONNECTION_STRING'))
    ],
          _out=sys.stdout,
          _err=sys.stdout)
    my_vm([
        'az', 'storage', 'blob', 'upload', '-f', rpm_file_path, '-n',
        artifact_name, '-c', 'rpms', '--connection-string', '"{}"'.format(
            script_env.get('AZURE_STORAGE_CONNECTION_STRING'))
    ],
          _out=sys.stdout,
          _err=sys.stdout)
    io = StringIO()
    my_vm([
        'az', 'storage', 'blob', 'url', '-n', artifact_name, '-c', 'rpms',
        '--output', 'tsv', '--connection-string', '"{}"'.format(
            script_env.get('AZURE_STORAGE_CONNECTION_STRING'))
    ],
          _out=io)
    rpm_url = io.getvalue().strip()
    io.close()
    print_status('RPM file uploaded to the following URL.')
    print_status(rpm_url)
    # Publish to service
    my_vm(['wget', '-q', 'https://bootstrap.pypa.io/get-pip.py'],
          _out=sys.stdout,
          _err=sys.stdout)
    my_vm(['sudo', 'python', 'get-pip.py'], _out=sys.stdout, _err=sys.stdout)
    my_vm(['sudo', 'pip', 'install', '--upgrade', 'requests'],
          _out=sys.stdout,
          _err=sys.stdout)
    upload_script = REPO_UPLOAD_SCRIPT_TMPL.format(
        cli_version=script_env.get('CLI_VERSION'),
        repo_id=script_env.get('YUM_REPO_ID'),
        source_url=rpm_url,
        repo_package_url=script_env.get('MS_REPO_URL'),
        repo_user=script_env.get('MS_REPO_USERNAME'),
        repo_pass=script_env.get('MS_REPO_PASSWORD'))
    my_vm(['echo', '-e', '"{}"'.format(upload_script), '>>', 'repo_upload.py'],
          _out=sys.stdout,
          _err=sys.stdout)
    # Keeping this code commented for when we can automate the signing of RPM packages.
    # my_vm(['python', 'repo_upload.py'], _out=sys.stdout, _err=sys.stdout)
    print_status(
        'PRINTING OUT REPO UPLOAD SCRIPT AS THE UNSIGNED RPM NEEDS TO BE FIRST SIGNED BEFORE UPLOADING...'
    )
    my_vm(['cat', 'repo_upload.py'], _out=sys.stdout, _err=sys.stdout)
    print_status('Done. :)')
    give_chance_to_cancel('Delete resource group (in background)')
    az(['group', 'delete', '--name', resource_group, '--yes', '--no-wait'],
       _out=sys.stdout,
       _err=sys.stdout)
    print_status('Finished. :)')