示例#1
0
def run(args):
    ''' main script  after arguments '''
    from deployer import Deployer

    # This script expects that the following environment vars are set:
    #
    # AZURE_TENANT_ID: with your Azure Active Directory tenant id or domain
    # AZURE_CLIENT_ID: with your Azure Active Directory Application Client ID
    # AZURE_CLIENT_SECRET: with your Azure Active Directory Application Secret


    msg = "\nInitializing the Deployer class with subscription id: {}, resource group: {}" \
          "\nand public key located at: {}...\n\n"
    msg = msg.format(args.my_subscription_id, args.my_resource_group,
                     args.my_pub_ssh_key_path)
    logging.info(msg)

    # Initialize the deployer class
    deploy = Deployer(subscription_id=args.my_subscription_id,
                      location=args.location)
    ##

    logging.info("Beginning the deployment... \n\n")
    # Deploy the template
    args.dns_label_prefix = args.vmName.lower(
    )  ##re ^[a-z][a-z0-9-]{1,61}[a-z0-9]$

    deploy.deploy(vars(args))

    logging.warn(
        "Done deploying!!\n\nYou can connect via: `ssh {}@{}.australiaeast.cloudapp.azure.com`"
        .format(args.adminUserName, args.dns_label_prefix))
    logging.debug(str(deploy))
def run_example(config):
    try:
        my_subscription_id = config[
            'subscriptionId']  # your Azure Subscription Id
        my_resource_group = 'azure-python-deployment-sample'  # the resource group for deployment
        my_pub_ssh_key_path = os.path.expanduser(
            '~/.ssh/id_rsa.pub')  # the path to your rsa public key file

        msg = "\nInitializing the Deployer class with subscription id: {}, resource group: {}" \
            "\nand public key located at: {}...\n\n"
        msg = msg.format(my_subscription_id, my_resource_group,
                         my_pub_ssh_key_path)
        print(msg)

        # Initialize the deployer class
        deployer = Deployer(config, my_resource_group, my_pub_ssh_key_path)

        print("Beginning the deployment... \n\n")
        # Deploy the template
        my_deployment = deployer.deploy()

        print("Done deploying!!")

    finally:
        print("Cleaning up the deployment... \n\n")
        #Destroy the resource group which contains the deployment
        deployer.destroy()
        print("Clean up the deployment sucessfully. \n")
示例#3
0
 def __init__(self, problem, sample_time=0.01, update_time=0.1):
     self.deployer = Deployer(problem, sample_time, update_time)
     self.update_time = update_time
     self.sample_time = sample_time
     self.problem = problem
     PlotLayer.simulator = self
     self.reset_timing()
示例#4
0
 def upload_theme(self, reason: str):
     d = Deployer(self.sub)
     stylesheet = open(self.args.repo +
                       "/dist/assets/css/screen.css").read()
     images = []
     for suffix in ('png', 'PNG', 'jpg', 'JPG', 'jpeg', 'JPEG'):
         pattern = self.args.repo + "/dist/assets/*/*" + suffix
         images += [(Path(p).stem, p) for p in glob(pattern)]
     d.put(stylesheet, images, reason)
示例#5
0
文件: main.py 项目: jar349/deployer
async def handle(request: Command):
    namespace = request.namespace
    command = request.command
    event = request.event
    print(
        f"Received namespace: {namespace}, command: {command}, event: {event}")

    if not command:
        return {
            "message":
            f":red_circle: No command provided.  Try `.{namespace} help`"
        }

    cmd_parts = command.split()
    # slack sends links surrounded by angle brackets (<, >) if it recognizes a URL, so we need to extract the URL
    substring = SlackFormattedSubstring(cmd_parts[0])
    handler_url = substring.get_content_or_none() if substring.is_url_link(
    ) else substring.get_raw()

    if not validators.url(handler_url):
        return {
            "message":
            (f":red_circle: `{handler_url}` does not seem to be a valid URL; see: "
             +
             "https://validators.readthedocs.io/en/latest/index.html#module-validators.url"
             )
        }

    deployer = Deployer(handler_url)

    if len(cmd_parts) > 1:
        if cmd_parts[1] == "to":
            if len(cmd_parts) == 3:
                deployer.set_environment(cmd_parts[2])
            else:
                return {
                    "message":
                    f":red_circle: I don't understand that command; try: `.{namespace} help`"
                }
        else:
            return {
                "message":
                f":red_circle: I don't understand that command; try: `.{namespace} help`"
            }
    elif len(cmd_parts) > 3:
        return {
            "message":
            f":red_circle: I don't understand that command; try: `.{namespace} help`"
        }

    return {"message": deployer.deploy()}
示例#6
0
def main():
  config = get_config()
  name = config.name
  img_name = "deploy_data/" + name + ".tif"
  data = skimage.io.imread(img_name)

  print data.shape
 
  data = np.expand_dims(data, axis=3)
  data = (data / 255.).astype(np.float32)
  
  print data.shape

  with tf.Session() as sess:
    deployer = Deployer(sess, data, config)
    deployer.deploy()
def deploy():
    deployer = Deployer()
    print("\nDeploying contract...")
    (contract, tx_hash) = deployer.deploy(
        compiled_path,
        contract_name,
        {
            "from": sender_account,
            "value": 0,
            "nonce": 1642,
            "gas": gas,
            "gasPrice": gas_price
        },
    )
    print("\nDeployment transaction hash: ", tx_hash.hex(),
          "\nCrowdsale address: ", contract.address)
    write_to_address_log(contract)
    return contract
示例#8
0
文件: provazure.py 项目: toto23/test
# Standard_DS3_v2_Promo,Standard_DS4_v2_Promo,Standard_DS5_v2_Promo,Standard_DS11_v2_Promo,Standard_DS12_v2_Promo,Standard_DS13_v2_Promo,Standard_DS14_v2_Promo,Standard_F1s,Standard_F2s
# Standard_F4s,Standard_F8s,Standard_F16s,Standard_D2_v3,Standard_D4_v3,Standard_D8_v3,Standard_D16_v3,Standard_D32_v3,Standard_D2s_v3,Standard_D4s_v3,Standard_D8s_v3,Standard_D16s_v3
# Standard_D32s_v3,Standard_NV6,Standard_NV12,Standard_NV24,Standard_D64_v3,Standard_D64s_v3,Standard_E2_v3,Standard_E4_v3,Standard_E8_v3,Standard_E16_v3,Standard_E32_v3,Standard_E64_v3
# Standard_E2s_v3,Standard_E4s_v3,Standard_E8s_v3,Standard_E16s_v3,Standard_E32s_v3,Standard_E64s_v3,Standard_F2s_v2,Standard_F4s_v2,Standard_F8s_v2,Standard_F16s_v2,Standard_F32s_v2
# Standard_F64s_v2,Standard_F72s_v2,Standard_NC6,Standard_NC12,Standard_NC24,Standard_NC24r,Standard_H8,Standard_H16,Standard_H8m,Standard_H16m,Standard_H16r,Standard_H16mr,Standard_G1
# Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS4-4,Standard_GS4-8,Standard_GS5,Standard_GS5-8,Standard_GS5-16
# Standard_L4s,Standard_L8s,Standard_L16s,Standard_L32s,Standard_M64-16ms,Standard_M64-32ms,Standard_M64ms,Standard_M64s,Standard_M128-32ms,Standard_M128-64ms,Standard_M128ms
# Standard_M128s,Standard_ND6s,Standard_ND12s,Standard_ND24rs,Standard_ND24s,Standard_E32-8s_v3,Standard_E32-16s_v3,Standard_E64-16s_v3,Standard_E64-32s_v3,Standard_NC6s_v2
# Standard_NC12s_v2,Standard_NC24rs_v2,Standard_NC24s_v2,Standard_A8,Standard_A9,Standard_A10,Standard_A11

user = ""
passwd = ""
my_subscription_id = ""
my_resource_group = ''

# Initialize the deployer class
# deployer = Deployer(my_subscription_id, my_resource_group, my_pub_ssh_key_path)
print("Deployment set with " + my_subscription_id + " and " +
      my_resource_group)
deployer = Deployer(user, passwd, my_subscription_id, my_resource_group)

# Deploy the template
print("Beginning the deployment... \n\n")
my_deployment = deployer.deploy()

print(
    "Done deploying !!\n\nYou can connect via: `ssh azureSample@{}.westeuropo.cloudapp.azure.com`"
    .format(deployer.dns_label_prefix))

# Destroy the resource group which contains the deployment
# deployer.destroy()
示例#9
0
#!/usr/bin/env python3

import sys

sys.path.append("../deployment")
from deployer import Deployer
from web3_interface import Web3Interface
from tx_checker import fails, succeeds
from test_config import config_f
import random

web3 = Web3Interface().w3
eth = web3.eth
web3.miner.start(1)
deployer = Deployer()

gas = 5000000
gas_price = 20000000000

owner = eth.accounts[0]
release_agent = eth.accounts[1]
non_owner = eth.accounts[2]
transfer_agent = eth.accounts[3]
non_transfer_agent = eth.accounts[4]
non_release_agent = eth.accounts[5]

contribution_range = (1, 2**256)


def random_contribution():
    return 0
import os.path
from deployer import Deployer
# This script expects that the following environment vars are set:
#
# AZURE_TENANT_ID: with your Azure Active Directory tenant id or domain
# AZURE_CLIENT_ID: with your Azure Active Directory Application Client ID
# AZURE_CLIENT_SECRET: with your Azure Active Directory Application Secret
# AZURE_SUBSCRIPTION_ID: with your target subscription ID

my_subscription_id = os.environ.get(
    'AZURE_SUBSCRIPTION_ID',
    '11111111-1111-1111-1111-111111111111')  # your Azure Subscription Id
msg = "\nInitializing the Deployer class with subscription id: {}\n\n"
msg = msg.format(my_subscription_id)
print(msg)

# Initialize the deployer class
deployer = Deployer(my_subscription_id)

print("Beginning the deployment... \n\n")
# Deploy the template
#deployer.createResourceGroup("MyResourceGroup")
deployer.resource_group = "MyResourceGroup"
my_deployment = deployer.deployAtResourceGroupScope("vnets.json")
my_deployment = deployer.deployAtSubscriptionScope('resourcegroups.json')

print("Done deploying!!")
示例#11
0
    def start_experiment(self,
            results_dir,
            update_and_install=False,
            experiment_runtime = 5,     # minutes
            runtime_buffer = 1,         # minutes
            stats_frequency ="30s",
            num_threads_in_pool = 5,
            replication_factor = 1,
            memaslap_workload = "smallvalue.cfg",
            memaslap_window_size = "10k",
            hibernate_at_end = True,
            ssh_username = "******",
            num_memaslaps = 1,
            num_memcacheds = 1,
            concurrency = 64,
            is_first_run=True
    ):
        experiment_runtime_string = "{}m".format(experiment_runtime)
    
        with fabric.api.settings(warn_only=True):
            fabric.api.local("rm -r {}/*".format(results_dir))
            fabric.api.local("mkdir -p {}".format(results_dir))
            fabric.api.local("mkdir {}/graphs".format(results_dir))
    
        # region ---- Parameters ----
        TOTAL_MACHINE_COUNT = 11  # this is fixed by the template
        resource_group_name = 'template11vms'
        my_pub_ssh_key_path = '~/.ssh/id_rsa_asl.pub'
        template_path = "azure-templates/template11vms.json"

        pub_ssh_key_path = os.path.expanduser(my_pub_ssh_key_path)
        with open(pub_ssh_key_path, 'r') as pub_ssh_file_fd:
            pub_ssh_key = pub_ssh_file_fd.read().strip()
    
        parameters = {
            "virtualMachines_name": "foraslvms",
            "virtualMachines_adminPassword": "******",
            "networkInterfaces_name": "MyNetworkInterface",
            "virtualNetworks_testeth_vnet_name": "MyVNet",
            "key": pub_ssh_key,
            "uniquedns": "pungast"
        }
        # endregion
    
        # Initialize the deployer class
        if is_first_run:
            self.deployer = Deployer(resource_group_name, template_path, parameters)
            self.deployer.deploy_wait()
    
        # region ---- Extract VMs' IPs and other information ----
        vms = self.deployer.compute_client.virtual_machines.list(resource_group_name)
        vm_names = []
        vm_types = []
        public_hostnames = []
        private_hostnames = []
    
        for vm in vms:
            vm_type = vm.hardware_profile.vm_size
            vm_types.append(vm_type)
            vm_names.append(vm.name)
            self.log.info("VM {} [{}]".format(Colors.ok_blue(vm.name), vm_type))
    
            # Get machine's public address that we can use for SSH-ing
            public_ip = self.deployer.network_client.public_ip_addresses.get(resource_group_name, vm.name)
            public_host_address = public_ip.dns_settings.fqdn
            public_hostnames.append(public_host_address)
            #self.log.info("Public host name: {}".format(Colors.ok_green(public_host_address)))
    
            # Get machine's private IP address
            network_interface_id = vm.network_profile.network_interfaces[0].id
            network_interface_name = network_interface_id.split("/")[-1]
            network_interface = self.deployer.network_client.network_interfaces.get(resource_group_name, network_interface_name)
            private_host_address = network_interface.ip_configurations[0].private_ip_address
            private_hostnames.append(private_host_address)
            #self.log.info("Private host name: {}".format(Colors.ok_green(private_host_address)))
    
        # endregion

        # region ---- Set up all machines ----
        index_a4 = vm_types.index("Basic_A4")
        indices_smallmachines = list(range(TOTAL_MACHINE_COUNT))
        indices_smallmachines.remove(index_a4)
        memcached_machines = [vm_names.index("foraslvms" + str(x)) for x in Experiment.default_memcached_machines()]
        memcached_machines = memcached_machines[0:num_memcacheds]
        memaslap_machines = [vm_names.index("foraslvms" + str(x)) for x in Experiment.default_memaslap_machines()]
        memaslap_machines = memaslap_machines[0:num_memaslaps]
    
        self.log.info("A4 machine: " + str(index_a4))
        self.log.info("A2 machines: " + str(indices_smallmachines))
        self.log.info("Memcached machines: " + str(memcached_machines))
        self.log.info("Memaslap machines: " + str(memaslap_machines))

        # Wait for all servers to be responsive
        if is_first_run:
            aslutil.wait_for_servers(ssh_username, public_hostnames, "~/.ssh/id_rsa_asl", self.log, check_every_n_sec=10)

        # Set up memcached servers
        memcached_port = 11211
        mc_servers = []
        mc_server_string_list = []
        for i in memcached_machines:
            self.log.info("Setting up memcached on machine {} ({}).".format(i, vm_names[i]))
            mc_server = Memcached(memcached_port, public_hostnames[i], ssh_username=ssh_username,
                                  id_number=int(aslutil.server_name_to_number(vm_names[i])))
            mc_servers.append(mc_server)
            mc_server_string_list.append("{}:{}".format(private_hostnames[i], memcached_port))
            if update_and_install:
                mc_server.update_and_install()
        for s in mc_servers:
            s.start()

        sleep_for = 15
        self.log.info("Sleeping for {} seconds so memcached servers can start...".format(sleep_for))
        time.sleep(sleep_for)
    
        # Set up middleware server
        middleware_port = 11212
        self.log.info("Setting up middleware on machine {} ({}).".format(index_a4, vm_names[index_a4]))
        mw_server = Middleware(public_hostnames[index_a4], private_hostnames[index_a4], middleware_port,
                               num_threads_in_pool, replication_factor, mc_server_string_list, ssh_username=ssh_username)

        if update_and_install:
            mw_server.update_and_install()
        if is_first_run:
            mw_server.upload_jar()

        mw_server.clear_logs()
        mw_server.start()
    
        # Sleep a bit so middleware has time to start
        while not mw_server.is_running():
            sleep_for = 5
            self.log.info("Sleeping for {} seconds so middleware can start...".format(sleep_for))
            time.sleep(sleep_for)
        time.sleep(10)
    
        # Set up memaslap servers
        ms_servers = []
        first_memaslap = True
        for i in memaslap_machines:
            self.log.info("Setting up memaslap on machine {} ({}).".format(i, vm_names[i]))
            ms_server = Memaslap(public_hostnames[i], private_hostnames[index_a4], middleware_port, ssh_username=ssh_username,
                                 id_number=int(aslutil.server_name_to_number(vm_names[i]))) # i is zero-indexed
            ms_servers.append(ms_server)

            if is_first_run:
                ms_server.upload_resources()

            if update_and_install:
                if not first_memaslap:
                    ms_server.upload_built_files()
    
                ms_server.update_and_install()
    
                if first_memaslap:
                    ms_server.download_built_files()
                    first_memaslap = False
    
        for s in ms_servers:
            s.clear_logs()
            s.start(runtime=experiment_runtime_string, log_filename="memaslap{}.out".format(s.id_number),
                    stats_freq=stats_frequency, workload_filename=memaslap_workload, concurrency=concurrency,
                    window_size=memaslap_window_size)

        # endregion
    
        sleep_time = experiment_runtime + runtime_buffer
        self.log.info("Waiting for the experiment to finish, sleeping for up to {} minutes.".format(sleep_time))
        already_slept = 0
        while True:
            sleep_interval = 30
            time.sleep(sleep_interval)
            already_slept += sleep_interval

            num_running_memaslaps = sum([s.is_running() for s in ms_servers])

            self.log.info("Waiting for the experiment to finish (total {} minutes), {:.0f}/{} minutes elapsed ({:.0f}%), {} memaslaps running."
                     .format(sleep_time,
                             already_slept / 60, experiment_runtime,
                             100 * already_slept / 60.0 / experiment_runtime,
                             num_running_memaslaps))
            if already_slept >= sleep_time * 60:
                self.log.info("Stopping because of time limit.")
                break
            if num_running_memaslaps == 0:
                self.log.info("Stopping because no memaslaps are left.")
                break
    
        # region ---- Kill everyone ----
        # Memaslap
        for ms_server in ms_servers:
            ms_server.stop()
    
        # Middleware
        mw_server.stop()
    
        # Memcached
        for mc_server in mc_servers:
            mc_server.stop()
    
        # endregion
    
        # region ---- Download logs, extract data, plot ----
        mw_server.download_logs(local_path=results_dir)
        for ms_server in ms_servers:
            ms_server.download_logs(local_path=results_dir)
    
        # endregion
    
        if hibernate_at_end:
            self.deployer.hibernate_wait()
    
        self.log.info("Done.")
示例#12
0
pub_ssh_key_path = os.path.expanduser(my_pub_ssh_key_path)
with open(pub_ssh_key_path, 'r') as pub_ssh_file_fd:
    pub_ssh_key = pub_ssh_file_fd.read().strip()

parameters = {
    "virtualMachines_name": "foraslvms",
    "virtualMachines_adminPassword": "******",
    "networkInterfaces_name": "MyNetworkInterface",
    "virtualNetworks_testeth_vnet_name": virtual_network_name,
    "key": pub_ssh_key,
    "uniquedns": dns_label_prefix
}
# endregion

# Initialize the deployer class
deployer = Deployer(resource_group_name, template_path, parameters)
deployer.deploy_wait()

# region ---- Extract VMs' IPs ----
vms = deployer.compute_client.virtual_machines.list(resource_group_name)
vm_names = []
public_hostnames = []
private_hostnames = []

for vm in vms:
    log.info("VM {}".format(Colors.ok_blue(vm.name)))
    # Get machine's public address that we can use for SSH-ing
    public_ip = deployer.network_client.public_ip_addresses.get(resource_group_name, vm.name)
    public_host_address = public_ip.dns_settings.fqdn
    public_hostnames.append(public_host_address)
    log.info("Public host name: {}".format(Colors.ok_green(public_host_address)))
示例#13
0
from deployer import Deployer
import json
import helm

if len(argv) != 3:
    print('USAGE: (install|delete) file.json')
    exit(1)

if argv[1] == "install":
    delete_mode = False
elif argv[1] == "delete":
    delete_mode = True
else:
    print('USAGE: (install|delete) file.json')
    exit(1)

with open(argv[2], 'r') as stream:
    doc = json.loads(stream.read())

if 'repositories' in doc:
    for name, url in doc['repositories'].items():
        helm.repo_add(name, url)

helm.repo_update()

with Deployer() as deployer:
    if delete_mode:
        deployer.delete(doc['charts'], doc['env'])
    else:
        deployer.install(doc['charts'], doc['env'])
示例#14
0
import os.path
from deployer import Deployer


# This script expects that the following environment vars are set:
#
# AZURE_TENANT_ID: with your Azure Active Directory tenant id or domain
# AZURE_CLIENT_ID: with your Azure Active Directory Application Client ID
# AZURE_CLIENT_SECRET: with your Azure Active Directory Application Secret

my_subscription_id = os.environ.get(
    'AZURE_SUBSCRIPTION_ID', '00912682-1091-4918-8627-598dabcc00e1')   # your Azure Subscription Id
# the resource group for deployment
my_resource_group = 'azure-aks'

msg = "\nInitializing the Deployer class with subscription id: {}, resource group: {}"
msg = msg.format(my_subscription_id, my_resource_group)
print(msg)

# Initialize the deployer class
deployer = Deployer(my_subscription_id, my_resource_group)

print("Beginning the deployment... \n\n")
# Deploy the template
my_deployment = deployer.deploy()

print("Done deploying!!")

# Destroy the resource group which contains the deployment
# deployer.destroy()
# Create a random password with Haikunator
passwd = funny_generator.haikunate()
# Fix haikunate password for Azure-friendly
passwd_list = list(passwd)
my_user_password = ''.join([passwd_list[i].upper() if i == 0 else \
    passwd_list[i] for i in range(len(passwd_list))]).replace('-', '_')
print('Take note of the admin password: '******'/home/usersinfo.csv', download_path='.')

# TODO: create a database of users and passwords
# AZURE_SUBSCRIPTION_ID: your subscription id
# AZURE_RESOURCE_LOCATION: with your azure stack resource location
# this example assumes your ssh public key present here: ~/id_rsa.pub

my_subscription_id = os.environ.get(
    'AZURE_SUBSCRIPTION_ID')  # your Azure Subscription Id
my_resource_group = 'azure-python-deployment-sample'  # the resource group for deployment
my_pub_ssh_key_path = os.path.expanduser(
    '~/id_rsa.pub')  # the path to your rsa public key file

# Set Azure stack supported API profile as the default profile
KnownProfiles.default.use(KnownProfiles.v2018_03_01_hybrid)

msg = "\nInitializing the Deployer class with subscription id: {}, resource group: {}" \
    "\nand public key located at: {}...\n\n"
msg = msg.format(my_subscription_id, my_resource_group, my_pub_ssh_key_path)
print(msg)

# Initialize the deployer class
deployer = Deployer(my_subscription_id, my_resource_group, my_pub_ssh_key_path)

print("Beginning the deployment... \n\n")
# Deploy the template
my_deployment = deployer.deploy()

print(
    "Done deploying!!\n\nYou can connect via: `ssh azureSample@{}.local.cloudapp.azurestack.external`"
    .format(deployer.dns_label_prefix))

# Destroy the resource group which contains the deployment
# deployer.destroy()
示例#17
0
    elif len(args_list) == 1:
        dp.test_server()
        dp.run_command(args_list[0])
    else:
        server_list = deal_select_server(args_list[1:])
        dp.run_command(args_list[0], server_list)

if __name__=='__main__':
    if '-h' in sys.argv or '--help' in sys.argv:
        show_help()
        sys.exit(0)

    print(color_str(Color.YELLOW, "loading profile ing...\n"))

    conf_path=os.environ['HOME'] + "/.ssh/auto_password.conf"
    dp=Deployer(conf_path)

    print('read profile {}\n'.format(color_str(Color.GREEN, "success")))

    opts = sys.argv[1:]

    if not opts:
        loop_manage_server()
    elif opts[0] == 'push':
        push_file_controller(opts[1:])
    elif opts[0] == 'pull':
        pull_file_controller(opts[1:])
    elif opts[0] == 'run':
        run_command_controller(opts[1:])
    elif opts[0] == 'info':
        dp.test_server()
示例#18
0
def build(config: dict, args: argparse.Namespace) -> None:
    model = args.model
    group = args.group
    pipeline = args.pipeline

    if group:
        models = config['model_groups'].get(group)
        if not models:
            print(f'Group {group} does not exist or empty')
            return
    elif model:
        models = [model]
    else:
        print('Please, specify group or model full name')
        return

    absent_models = set(models) - set(config['models'].keys())
    if len(absent_models) > 0:
        absent_models = ', '.join(absent_models)
        print(f'Unknown model full names: {absent_models}')
        return

    if pipeline and pipeline not in preset_pipelines.keys():
        print(f'Unknown pipeline name: {pipeline}')
        return
    elif pipeline:
        for model in models:
            config['models'][model]['pipeline'] = pipeline
    else:
        absent_pipeline_models = []
        for model in models:
            if config['models'][model].get(
                    'pipeline') not in preset_pipelines.keys():
                absent_pipeline_models.append(model)
        if absent_pipeline_models:
            absent_pipeline_models = ', '.join(absent_pipeline_models)
            print(
                f'Incorrect or absent pipeline names for: {absent_pipeline_models}'
            )
            return

    # Test Docker Hub authentication
    dockerhub_password = args.dockerhub_pass

    if not dockerhub_password:
        prompt_text = 'Docker Hub password was not entered, would you like for proceed without Docker Hub login?'
        if not prompt_confirmation(prompt_text):
            return
    else:
        try:
            client: DockerClient = DockerClient(
                base_url=config['docker_base_url'])
            client.login(config['dockerhub_registry'], dockerhub_password)
        except APIError as e:
            print(e)
            prompt_text = 'Docker Hub login error occurred, would you like for proceed without Docker Hub login?'
            if not prompt_confirmation(prompt_text):
                return

    config['dockerhub_password'] = dockerhub_password

    deployer = Deployer(config)
    deployer.deploy(models)
示例#19
0
                        nargs='?',
                        help='Local branch name',
                        default='master')

    parser.add_argument('-rb',
                        '--remote-branch',
                        nargs='?',
                        help='Remote branch name',
                        default='origin')

    parser.add_argument('-w',
                        '--webhook-path',
                        nargs='?',
                        help='Path for Git webhook',
                        default='/git/<reponame>/webhook')

    parser.add_argument('-p',
                        '--webhook-port',
                        type=int,
                        nargs='?',
                        help='Port for Git webhook',
                        default=9050)

    return parser.parse_args()


if __name__ == '__main__':
    args = parse_arguments()
    init()  # colorama
    deployer = Deployer(args)