def run(args): ''' main script after arguments ''' from deployer import Deployer # This script expects that the following environment vars are set: # # AZURE_TENANT_ID: with your Azure Active Directory tenant id or domain # AZURE_CLIENT_ID: with your Azure Active Directory Application Client ID # AZURE_CLIENT_SECRET: with your Azure Active Directory Application Secret msg = "\nInitializing the Deployer class with subscription id: {}, resource group: {}" \ "\nand public key located at: {}...\n\n" msg = msg.format(args.my_subscription_id, args.my_resource_group, args.my_pub_ssh_key_path) logging.info(msg) # Initialize the deployer class deploy = Deployer(subscription_id=args.my_subscription_id, location=args.location) ## logging.info("Beginning the deployment... \n\n") # Deploy the template args.dns_label_prefix = args.vmName.lower( ) ##re ^[a-z][a-z0-9-]{1,61}[a-z0-9]$ deploy.deploy(vars(args)) logging.warn( "Done deploying!!\n\nYou can connect via: `ssh {}@{}.australiaeast.cloudapp.azure.com`" .format(args.adminUserName, args.dns_label_prefix)) logging.debug(str(deploy))
def run_example(config): try: my_subscription_id = config[ 'subscriptionId'] # your Azure Subscription Id my_resource_group = 'azure-python-deployment-sample' # the resource group for deployment my_pub_ssh_key_path = os.path.expanduser( '~/.ssh/id_rsa.pub') # the path to your rsa public key file msg = "\nInitializing the Deployer class with subscription id: {}, resource group: {}" \ "\nand public key located at: {}...\n\n" msg = msg.format(my_subscription_id, my_resource_group, my_pub_ssh_key_path) print(msg) # Initialize the deployer class deployer = Deployer(config, my_resource_group, my_pub_ssh_key_path) print("Beginning the deployment... \n\n") # Deploy the template my_deployment = deployer.deploy() print("Done deploying!!") finally: print("Cleaning up the deployment... \n\n") #Destroy the resource group which contains the deployment deployer.destroy() print("Clean up the deployment sucessfully. \n")
def __init__(self, problem, sample_time=0.01, update_time=0.1): self.deployer = Deployer(problem, sample_time, update_time) self.update_time = update_time self.sample_time = sample_time self.problem = problem PlotLayer.simulator = self self.reset_timing()
def upload_theme(self, reason: str): d = Deployer(self.sub) stylesheet = open(self.args.repo + "/dist/assets/css/screen.css").read() images = [] for suffix in ('png', 'PNG', 'jpg', 'JPG', 'jpeg', 'JPEG'): pattern = self.args.repo + "/dist/assets/*/*" + suffix images += [(Path(p).stem, p) for p in glob(pattern)] d.put(stylesheet, images, reason)
async def handle(request: Command): namespace = request.namespace command = request.command event = request.event print( f"Received namespace: {namespace}, command: {command}, event: {event}") if not command: return { "message": f":red_circle: No command provided. Try `.{namespace} help`" } cmd_parts = command.split() # slack sends links surrounded by angle brackets (<, >) if it recognizes a URL, so we need to extract the URL substring = SlackFormattedSubstring(cmd_parts[0]) handler_url = substring.get_content_or_none() if substring.is_url_link( ) else substring.get_raw() if not validators.url(handler_url): return { "message": (f":red_circle: `{handler_url}` does not seem to be a valid URL; see: " + "https://validators.readthedocs.io/en/latest/index.html#module-validators.url" ) } deployer = Deployer(handler_url) if len(cmd_parts) > 1: if cmd_parts[1] == "to": if len(cmd_parts) == 3: deployer.set_environment(cmd_parts[2]) else: return { "message": f":red_circle: I don't understand that command; try: `.{namespace} help`" } else: return { "message": f":red_circle: I don't understand that command; try: `.{namespace} help`" } elif len(cmd_parts) > 3: return { "message": f":red_circle: I don't understand that command; try: `.{namespace} help`" } return {"message": deployer.deploy()}
def deploy(slug, testing_url, production_url, theme_url, production_server, production_dir): build_dir = os.path.join(SETTINGS.BUILD_DIR, slug) archive_dir = os.path.join(SETTINGS.ARCHIVE_DIR, slug) compiler = Compiler(build_dir, testing_url, production_url, theme_url) compiler.compile() archiver = Archiver(slug, build_dir, archive_dir) archive = archiver.archive() deployer = Deployer(production_server, SETTINGS.SSH_KEY, archive_dir, production_dir) deployer.deploy(archive) return True
def main(): config = get_config() name = config.name img_name = "deploy_data/" + name + ".tif" data = skimage.io.imread(img_name) print data.shape data = np.expand_dims(data, axis=3) data = (data / 255.).astype(np.float32) print data.shape with tf.Session() as sess: deployer = Deployer(sess, data, config) deployer.deploy()
def deploy(): deployer = Deployer() print("\nDeploying contract...") (contract, tx_hash) = deployer.deploy( compiled_path, contract_name, { "from": sender_account, "value": 0, "nonce": 1642, "gas": gas, "gasPrice": gas_price }, ) print("\nDeployment transaction hash: ", tx_hash.hex(), "\nCrowdsale address: ", contract.address) write_to_address_log(contract) return contract
def main(): parser = argparse.ArgumentParser(description='pyginate - static web site generator') parser.add_argument('action', help='Action to perform', choices=['build', 'deploy', 'process']) parser.add_argument('-d', '--dir', help='Web site base directory', default=os.getcwd()) parser.add_argument('-c', '--config_file_name', help='Configuration file name', default='pyginator.json') parser.add_argument('-s', '--processing_script', help='Procesing script to apply', default='') parser.add_argument('-dr', '--dry_run', action='store_true', help='Do not apply any changes, just report') args = parser.parse_args() conf_file = os.path.join(args.dir, args.config_file_name) configuration = Configuration(args.dir, json.load(open(conf_file))) if args.action == 'build': builder = Builder(configuration) builder.build() elif args.action == 'deploy': deployer = Deployer(configuration) deployer.deploy() elif args.action == 'process': if args.dry_run: configuration.dry_run = True processor = Processor(configuration, args.processing_script) processor.process()
from flask import Flask, Response, request from flask_jwt import JWT, jwt_required, current_identity import time, json, hashlib, binascii, os from pymongo import MongoClient from werkzeug.security import safe_str_cmp from deployer import Deployer from prometheus_client.exposition import CONTENT_TYPE_LATEST, generate_latest MONGODB_HOSTNAME = os.environ.get("MONGODB_HOSTNAME", "localhost") MONGODB_PORT = os.environ.get("MONGODB_PORT", "27017") #log = logging.getLogger('console') app = Flask(__name__) deployer = Deployer() deployer.start() username_table = {} userid_table = {} client = None class User(object): def __init__(self, id, username, password): self.id = id self.username = username self.password = password def __str__(self): return "User(id='%s')" % self.id
class Simulator: def __init__(self, problem, sample_time=0.01, update_time=0.1): self.deployer = Deployer(problem, sample_time, update_time) self.update_time = update_time self.sample_time = sample_time self.problem = problem PlotLayer.simulator = self self.reset_timing() def set_problem(self, problem): self.deployer.set_problem(problem) self.problem = problem def run(self): self.deployer.reset() stop = False while not stop: stop = self.update() ### adapted ### if (stop or self.update_time - float(self.problem.vehicles[0].signals['time'][:, -1] - self.current_time)) > self.sample_time: update_time = float( self.problem.vehicles[0].signals['time'][:, -1] - self.current_time) # correcting for first time # avoid negative update times self.update_timing(max(0, update_time - self.sample_time)) else: self.update_timing() self.problem.final() # return trajectories and signals trajectories, signals = {}, {} if len(self.problem.vehicles) == 1: return self.problem.vehicles[ 0].traj_storage, self.problem.vehicles[0].signals else: for vehicle in self.problem.vehicles: trajectories[str(vehicle)] = vehicle.traj_storage signals[str(vehicle)] = vehicle.signals return trajectories, signals def step(self, update_time=0.1): stop = self.update() if stop: update_time = float(self.problem.vehicles[0].signals['time'][:, -1] - self.current_time) self.update_timing(update_time) self.problem.final() else: self.update_timing(update_time) # return trajectories and signals trajectories, signals, curr_state = {}, {}, {} # determine remaining motion time # depends on problem type, for fixedT motion_time is always = horizon_time # local import required to avoid circular dependency from ..problems.point2point import FixedTPoint2point, FreeTPoint2point if isinstance(self.problem, FixedTPoint2point): motion_time = self.problem.options['horizon_time'] elif isinstance(self.problem, FreeTPoint2point): motion_time = self.problem.father.get_variables(self.problem, 'T')[0][0] if len(self.problem.vehicles) == 1: return self.problem.vehicles[0].signals[ 'state'][:, -1], self.current_time, motion_time, stop, self.problem.vehicles[ 0].traj_storage, self.problem.vehicles[0].signals else: for vehicle in self.problem.vehicles: trajectories[str(vehicle)] = vehicle.traj_storage signals[str(vehicle)] = vehicle.signals curr_state[str(vehicle)] = vehicle.signals['state'][:, -1] return curr_state, self.current_time, motion_time, stop, trajectories, signals def update(self): # update deployer self.deployer.update(self.current_time) # simulate problem self.problem.simulate(self.current_time, self.update_time, self.sample_time) # check stop condition stop = self.problem.stop_criterium(self.current_time, self.update_time) return stop def reset_timing(self): self.current_time = 0. self.time = np.r_[0.] def update_timing(self, update_time=None): update_time = self.update_time if not update_time else update_time self.current_time += update_time n_samp = int(np.round(update_time / self.sample_time, 6)) # n_samp = max(0, int(np.round(update_time/self.sample_time, 6))) self.time = np.r_[ self.time, np.linspace(self.time[-1] + self.sample_time, self.time[-1] + n_samp * self.sample_time, n_samp)] def run_once(self, simulate=True, **kwargs): if 'hard_stop' in kwargs: hard_stop = kwargs['hard_stop'] else: hard_stop = None self.deployer.reset() self.deployer.update(self.current_time, None, update_time=np.inf) if not simulate: return None if hard_stop: self.hard_stop(self.current_time, hard_stop['time'], hard_stop['perturbation']) else: self.problem.simulate(self.current_time, np.inf, self.sample_time) self.problem.final() # determine timing update_time = self.problem.vehicles[0].signals[ 'time'][:, -1] - self.current_time self.update_timing(update_time) # return trajectories trajectories = {} if len(self.problem.vehicles) == 1: return self.problem.vehicles[0].trajectories else: for vehicle in self.problem.vehicles: trajectories[str(vehicle)] = vehicle.trajectories return trajectories def hard_stop(self, current_time, stop_time, perturbation): self.problem.simulate(current_time, stop_time, self.sample_time) for k, vehicle in enumerate(self.problem.vehicles): vehicle.overrule_state(vehicle.signals['state'][:, -1] + np.array(perturbation[k])) vehicle.overrule_input(np.zeros(len(vehicle.prediction['input']))) def sleep(self, sleep_time): self.problem.sleep(self.current_time, sleep_time, self.sample_time) self.update_timing(sleep_time) def time2index(self, time): Ts = self.sample_time for k, t in enumerate(self.time): t = np.round(t, 6) if (t <= time) and (time < (t + Ts)) and ((time - t) <= (t + Ts - time)): return k
elif len(args_list) == 1: dp.test_server() dp.run_command(args_list[0]) else: server_list = deal_select_server(args_list[1:]) dp.run_command(args_list[0], server_list) if __name__=='__main__': if '-h' in sys.argv or '--help' in sys.argv: show_help() sys.exit(0) print(color_str(Color.YELLOW, "loading profile ing...\n")) conf_path=os.environ['HOME'] + "/.ssh/auto_password.conf" dp=Deployer(conf_path) print('read profile {}\n'.format(color_str(Color.GREEN, "success"))) opts = sys.argv[1:] if not opts: loop_manage_server() elif opts[0] == 'push': push_file_controller(opts[1:]) elif opts[0] == 'pull': pull_file_controller(opts[1:]) elif opts[0] == 'run': run_command_controller(opts[1:]) elif opts[0] == 'info': dp.test_server()
from deployer import Deployer import json import helm if len(argv) != 3: print('USAGE: (install|delete) file.json') exit(1) if argv[1] == "install": delete_mode = False elif argv[1] == "delete": delete_mode = True else: print('USAGE: (install|delete) file.json') exit(1) with open(argv[2], 'r') as stream: doc = json.loads(stream.read()) if 'repositories' in doc: for name, url in doc['repositories'].items(): helm.repo_add(name, url) helm.repo_update() with Deployer() as deployer: if delete_mode: deployer.delete(doc['charts'], doc['env']) else: deployer.install(doc['charts'], doc['env'])
class Simulator: def __init__(self, problem, sample_time=0.01, update_time=0.1): self.deployer = Deployer(problem, sample_time, update_time) self.update_time = update_time self.sample_time = sample_time self.problem = problem PlotLayer.simulator = self self.reset_timing() def set_problem(self, problem): self.deployer.set_problem(problem) self.problem = problem def run(self): self.deployer.reset() stop = False while not stop: stop = self.update() ### adapted ### if (stop or self.update_time - float(self.problem.vehicles[0].signals['time'][:, -1] - self.current_time)) > self.sample_time: update_time = float(self.problem.vehicles[0].signals['time'][:, -1] - self.current_time) # correcting for first time # avoid negative update times self.update_timing(max(0,update_time-self.sample_time)) else: self.update_timing() self.problem.final() # return trajectories and signals trajectories, signals = {}, {} if len(self.problem.vehicles) == 1: return self.problem.vehicles[0].traj_storage, self.problem.vehicles[0].signals else: for vehicle in self.problem.vehicles: trajectories[str(vehicle)] = vehicle.traj_storage signals[str(vehicle)] = vehicle.signals return trajectories, signals def step(self, update_time=0.1): stop = self.update() if stop: update_time = float(self.problem.vehicles[0].signals['time'][:, -1] - self.current_time) self.update_timing(update_time) self.problem.final() else: self.update_timing(update_time) # return trajectories and signals trajectories, signals, curr_state = {}, {}, {} # determine remaining motion time # depends on problem type, for fixedT motion_time is always = horizon_time # local import required to avoid circular dependency from ..problems.point2point import FixedTPoint2point, FreeTPoint2point if isinstance(self.problem, FixedTPoint2point): motion_time = self.problem.options['horizon_time'] elif isinstance(self.problem, FreeTPoint2point): motion_time = self.problem.father.get_variables(self.problem, 'T')[0][0] if len(self.problem.vehicles) == 1: return self.problem.vehicles[0].signals['state'][:,-1], self.current_time, motion_time, stop, self.problem.vehicles[0].traj_storage, self.problem.vehicles[0].signals else: for vehicle in self.problem.vehicles: trajectories[str(vehicle)] = vehicle.traj_storage signals[str(vehicle)] = vehicle.signals curr_state[str(vehicle)] = vehicle.signals['state'][:,-1] return curr_state, self.current_time, motion_time, stop, trajectories, signals def update(self): # update deployer self.deployer.update(self.current_time) # simulate problem self.problem.simulate(self.current_time, self.update_time, self.sample_time) # check stop condition stop = self.problem.stop_criterium(self.current_time, self.update_time) return stop def reset_timing(self): self.current_time = 0. self.time = np.r_[0.] def update_timing(self, update_time=None): update_time = self.update_time if not update_time else update_time self.current_time += update_time n_samp = int(np.round(update_time/self.sample_time, 6)) # n_samp = max(0, int(np.round(update_time/self.sample_time, 6))) self.time = np.r_[self.time, np.linspace( self.time[-1]+self.sample_time, self.time[-1]+n_samp*self.sample_time, n_samp)] def run_once(self, simulate=True, **kwargs): if 'hard_stop' in kwargs: hard_stop = kwargs['hard_stop'] else: hard_stop = None self.deployer.reset() self.deployer.update(self.current_time, None, update_time=np.inf) if not simulate: return None if hard_stop: self.hard_stop(self.current_time, hard_stop['time'], hard_stop['perturbation']) else: self.problem.simulate(self.current_time, np.inf, self.sample_time) self.problem.final() # determine timing update_time = self.problem.vehicles[0].signals['time'][:, -1] - self.current_time self.update_timing(update_time) # return trajectories trajectories = {} if len(self.problem.vehicles) == 1: return self.problem.vehicles[0].trajectories else: for vehicle in self.problem.vehicles: trajectories[str(vehicle)] = vehicle.trajectories return trajectories def hard_stop(self, current_time, stop_time, perturbation): self.problem.simulate(current_time, stop_time, self.sample_time) for k, vehicle in enumerate(self.problem.vehicles): vehicle.overrule_state(vehicle.signals['state'][:, -1] + np.array(perturbation[k])) vehicle.overrule_input(np.zeros(len(vehicle.prediction['input']))) def sleep(self, sleep_time): self.problem.sleep(self.current_time, sleep_time, self.sample_time) self.update_timing(sleep_time) def time2index(self, time): Ts = self.sample_time for k, t in enumerate(self.time): t = np.round(t, 6) if (t <= time) and (time < (t+Ts)) and ((time-t) <= (t+Ts-time)): return k
#!/usr/bin/env python3 import sys sys.path.append("../deployment") import deployer from deployer import Deployer from web3_interface import Web3Interface from tx_checker import fails, succeeds web3 = Web3Interface().w3 web3.miner.start(1) deployer = Deployer() owner = web3.eth.accounts[0] new_owner = web3.eth.accounts[1] gas = 5000000 gas_price = 20000000000 tx = {"from": owner, "value": 0, "gas": gas, "gasPrice": gas_price} (ownable_contract, tx_hash) = deployer.deploy("./build/", "OwnableMock", tx,) receipt = web3.eth.waitForTransactionReceipt(tx_hash) assert receipt.status == 1 functions = ownable_contract.functions def get_owner(): return functions.owner().call() assert get_owner() == owner succeeds("Transfer ownership succeeds.", functions.transferOwnership(new_owner).transact(tx)) assert get_owner() == new_owner web3.miner.stop()
#!/usr/bin/env python3 import sys sys.path.append("../deployment") from deployer import Deployer from web3_interface import Web3Interface from tx_checker import fails, succeeds from test_config import config_f web3 = Web3Interface().w3 web3.miner.start(1) deployer = Deployer() accounts = web3.eth.accounts sender = accounts[0] gas = 50000000 gas_price = 20000000000 tx = {"from": sender, "value": 0, "gas": gas, "gasPrice": gas_price} config = config_f() to_mint = 100000 address_zero = web3.toChecksumAddress("0x0000000000000000000000000000000000000000") (mintable_token_contract, tx_hash) = deployer.deploy("./build/", "MintableTokenMock", tx, config['multisig_supply'], address_zero, True) receipt = web3.eth.waitForTransactionReceipt(tx_hash) assert receipt.status == 0 (mintable_token_contract, tx_hash) = deployer.deploy("./build/", "MintableTokenMock", tx, 0, config['MW_address'], False) receipt = web3.eth.waitForTransactionReceipt(tx_hash) assert receipt.status == 0
# Standard_DS3_v2_Promo,Standard_DS4_v2_Promo,Standard_DS5_v2_Promo,Standard_DS11_v2_Promo,Standard_DS12_v2_Promo,Standard_DS13_v2_Promo,Standard_DS14_v2_Promo,Standard_F1s,Standard_F2s # Standard_F4s,Standard_F8s,Standard_F16s,Standard_D2_v3,Standard_D4_v3,Standard_D8_v3,Standard_D16_v3,Standard_D32_v3,Standard_D2s_v3,Standard_D4s_v3,Standard_D8s_v3,Standard_D16s_v3 # Standard_D32s_v3,Standard_NV6,Standard_NV12,Standard_NV24,Standard_D64_v3,Standard_D64s_v3,Standard_E2_v3,Standard_E4_v3,Standard_E8_v3,Standard_E16_v3,Standard_E32_v3,Standard_E64_v3 # Standard_E2s_v3,Standard_E4s_v3,Standard_E8s_v3,Standard_E16s_v3,Standard_E32s_v3,Standard_E64s_v3,Standard_F2s_v2,Standard_F4s_v2,Standard_F8s_v2,Standard_F16s_v2,Standard_F32s_v2 # Standard_F64s_v2,Standard_F72s_v2,Standard_NC6,Standard_NC12,Standard_NC24,Standard_NC24r,Standard_H8,Standard_H16,Standard_H8m,Standard_H16m,Standard_H16r,Standard_H16mr,Standard_G1 # Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS4-4,Standard_GS4-8,Standard_GS5,Standard_GS5-8,Standard_GS5-16 # Standard_L4s,Standard_L8s,Standard_L16s,Standard_L32s,Standard_M64-16ms,Standard_M64-32ms,Standard_M64ms,Standard_M64s,Standard_M128-32ms,Standard_M128-64ms,Standard_M128ms # Standard_M128s,Standard_ND6s,Standard_ND12s,Standard_ND24rs,Standard_ND24s,Standard_E32-8s_v3,Standard_E32-16s_v3,Standard_E64-16s_v3,Standard_E64-32s_v3,Standard_NC6s_v2 # Standard_NC12s_v2,Standard_NC24rs_v2,Standard_NC24s_v2,Standard_A8,Standard_A9,Standard_A10,Standard_A11 user = "" passwd = "" my_subscription_id = "" my_resource_group = '' # Initialize the deployer class # deployer = Deployer(my_subscription_id, my_resource_group, my_pub_ssh_key_path) print("Deployment set with " + my_subscription_id + " and " + my_resource_group) deployer = Deployer(user, passwd, my_subscription_id, my_resource_group) # Deploy the template print("Beginning the deployment... \n\n") my_deployment = deployer.deploy() print( "Done deploying !!\n\nYou can connect via: `ssh azureSample@{}.westeuropo.cloudapp.azure.com`" .format(deployer.dns_label_prefix)) # Destroy the resource group which contains the deployment # deployer.destroy()
#!/usr/bin/env python3 import sys sys.path.append("../deployment") from deployer import Deployer from web3_interface import Web3Interface from tx_checker import fails, succeeds from test_config import config_f import time web3 = Web3Interface().w3 web3.miner.start(1) deployer = Deployer() owner = web3.eth.accounts[0] gas = 50000000 gas_price = 20000000000 tx = {"from": owner, "value": 0, "gas": gas, "gasPrice": gas_price} config = config_f() tokens_sold_example = 34 * 10**23 (token_tranche_pricing_contract, tx_hash) = deployer.deploy( "./build/", "TokenTranchePricingMock", tx, ) receipt = web3.eth.waitForTransactionReceipt(tx_hash) assert receipt.status == 1 functions = token_tranche_pricing_contract.functions def get_tranches_length():
# TODO: compare the wished for deployment with the current deployment # TODO: delete ones not in desired state # TODO: Update ones that are in current list # TODO: create new ones if they don't exist my_location = 'uksouth' # the path to your rsa public key file my_pub_ssh_key_path = os.path.expanduser('~/.ssh/id_rsa.pub') msg = "\nInitializing the Deployer class with subscription id: {}, resource group: {}" \ "\nand public key located at: {}...\n\n" msg = msg.format(my_subscription_id, my_resource_group, my_pub_ssh_key_path) print(msg) # Initialize the deployer class deployer = Deployer(my_subscription_id, my_resource_group, my_location, my_pub_ssh_key_path) print("Beginning the deployment... \n\n") # Deploy the template # deployer.deploy() print("Done deploying!!\n\nYou can connect via: `ssh azureSample@{}.westus.cloudapp.azure.com`".format( deployer.dns_label_prefix)) # Destroy the resource group which contains the deployment deployer.destroy() ()
#!/usr/bin/env python3 import sys sys.path.append("../deployment") from deployer import Deployer from web3_interface import Web3Interface from tx_checker import fails, succeeds from test_config import config_f web3 = Web3Interface().w3 config = config_f() web3.miner.start(1) deployer = Deployer() accounts = web3.eth.accounts sender = accounts[0] gas = 50000000 gas_price = 20000000000 tx = {"from": sender, "value": 0, "gas": gas, "gasPrice": gas_price} (crowdsale_token_contract, tx_hash) = deployer.deploy("./build/", "CrowdsaleToken", tx, config["multisig_supply"], config["token_decimals"], config["MW_address"], config["token_retriever_account"]) receipt = web3.eth.waitForTransactionReceipt(tx_hash) assert receipt.status == 1 functions = crowdsale_token_contract.functions def can_upgrade(): return functions.canUpgrade().call()
class Simulator: def __init__(self, problem, sample_time=0.01, update_time=0.1): self.deployer = Deployer(problem, sample_time, update_time) self.update_time = update_time self.sample_time = sample_time self.problem = problem PlotLayer.simulator = self self.reset_timing() def set_problem(self, problem): self.deployer.set_problem(problem) self.problem = problem def run(self): self.deployer.reset() stop = False while not stop: stop = self.update() if stop: update_time = float(self.problem.vehicles[0].signals['time'][:, -1] - self.current_time) self.update_timing(update_time-self.sample_time) #correcting for first time else: self.update_timing() self.problem.final() # return trajectories and signals trajectories, signals = {}, {} if len(self.problem.vehicles) == 1: return self.problem.vehicles[0].traj_storage, self.problem.vehicles[0].signals else: for vehicle in self.problem.vehicles: trajectories[str(vehicle)] = vehicle.traj_storage signals[str(vehicle)] = vehicle.signals return trajectories, signals def update(self): # update deployer self.deployer.update(self.current_time) # simulate problem self.problem.simulate(self.current_time, self.update_time, self.sample_time) # check stop condition stop = self.problem.stop_criterium(self.current_time, self.update_time) return stop def reset_timing(self): self.current_time = 0. self.time = np.r_[0.] def update_timing(self, update_time=None): update_time = self.update_time if not update_time else update_time self.current_time += update_time n_samp = int(np.round(update_time/self.sample_time, 6)) self.time = np.r_[self.time, np.linspace( self.time[-1]+self.sample_time, self.time[-1]+n_samp*self.sample_time, n_samp)] def run_once(self, simulate=True, **kwargs): if 'hard_stop' in kwargs: hard_stop = kwargs['hard_stop'] else: hard_stop = None self.deployer.reset() self.deployer.update(self.current_time, None, np.inf) if not simulate: return None if hard_stop: self.hard_stop(self.current_time, hard_stop['time'], hard_stop['perturbation']) else: self.problem.simulate(self.current_time, np.inf, self.sample_time) self.problem.final() # determine timing update_time = self.problem.vehicles[0].signals['time'][:, -1] - self.current_time self.update_timing(update_time) # return trajectories trajectories = {} if len(self.problem.vehicles) == 1: return self.problem.vehicles[0].trajectories else: for vehicle in self.problem.vehicles: trajectories[str(vehicle)] = vehicle.trajectories return trajectories def hard_stop(self, current_time, stop_time, perturbation): self.problem.simulate(current_time, stop_time, self.sample_time) for k, vehicle in enumerate(self.problem.vehicles): vehicle.overrule_state(vehicle.signals['state'][:, -1] + np.array(perturbation[k])) vehicle.overrule_input(np.zeros(len(vehicle.prediction['input']))) def sleep(self, sleep_time): self.problem.sleep(self.current_time, sleep_time, self.sample_time) self.update_timing(sleep_time) def time2index(self, time): Ts = self.sample_time for k, t in enumerate(self.time): t = np.round(t, 6) if (t <= time) and (time < (t+Ts)) and ((time-t) <= (t+Ts-time)): return k
#!/usr/bin/env python3 import sys sys.path.append("../deployment") from deployer import Deployer from web3_interface import Web3Interface from tx_checker import fails, succeeds import time web3 = Web3Interface().w3 web3.miner.start(1) deployer = Deployer() accounts = web3.eth.accounts sender = accounts[0] gas = 50000000 gas_price = 20000000000 tx = {"from": sender, "value": 0, "gas": gas, "gasPrice": gas_price} agent_to_approve = accounts[2] tokens_to_approve = 100000 (standard_token_mock_contract, tx_hash_standard_token) = deployer.deploy( "./build/", "StandardTokenMock", tx, ) time.sleep(1.4) (lost_and_found_token_contract, tx_hash) = deployer.deploy( "./build/", "LostAndFoundTokenMock",
#!/usr/bin/env python3 import sys sys.path.append("../deployment") from deployer import Deployer from web3_interface import Web3Interface from tx_checker import fails, succeeds web3 = Web3Interface().w3 web3.miner.start(1) deployer = Deployer() accounts = web3.eth.accounts sender = accounts[0] gas = 50000000 gas_price = 20000000000 tx = {"from": sender, "value": 0, "gas": gas, "gasPrice": gas_price} (standard_token_contract, tx_hash) = deployer.deploy("./build/", "StandardTokenMock", tx,) receipt = web3.eth.waitForTransactionReceipt(tx_hash) assert receipt.status == 1 functions = standard_token_contract.functions token_balances = {x : 0 for x in accounts} allowed = {i : {x : 0 for x in accounts} for i in accounts} total_supply = 0 value_transfer = 10000 value_mint = 100000 def get_total_supply(): return functions.totalSupply().call() def balance_of(address): return functions.balanceOf(address).call()
#!/usr/bin/env python3 import sys sys.path.append("../deployment") from deployer import Deployer from web3_interface import Web3Interface from tx_checker import fails, succeeds from test_config import config_f from generic_crowdsale_mock_checker import GenericCrowdsaleChecker web3 = Web3Interface().w3 config = config_f() web3.miner.start(1) deployer = Deployer() accounts = web3.eth.accounts sender = accounts[0] gas = 50000000 gas_price = 20000000000 tx = {"from": sender, "value": 0, "gas": gas, "gasPrice": gas_price} (generic_crowdsale_mock_contract, tx_hash) = deployer.deploy( "./build/", "GenericCrowdsaleMock", tx, ) receipt = web3.eth.waitForTransactionReceipt(tx_hash) assert receipt.status == 1 functions = generic_crowdsale_mock_contract.functions generic_crowdsale_mock_interface = GenericCrowdsaleChecker( config,
# Create a random password with Haikunator passwd = funny_generator.haikunate() # Fix haikunate password for Azure-friendly passwd_list = list(passwd) my_user_password = ''.join([passwd_list[i].upper() if i == 0 else \ passwd_list[i] for i in range(len(passwd_list))]).replace('-', '_') print('Take note of the admin password: '******'/home/usersinfo.csv', download_path='.') # TODO: create a database of users and passwords
def restore(slug, archive, production_server, production_dir): archive_dir = os.path.join(SETTINGS.ARCHIVE_DIR, slug) deployer = Deployer(production_server, SETTINGS.SSH_KEY, archive_dir, production_dir) deployer.deploy(archive)
def start_experiment(self, results_dir, update_and_install=False, experiment_runtime = 5, # minutes runtime_buffer = 1, # minutes stats_frequency ="30s", num_threads_in_pool = 5, replication_factor = 1, memaslap_workload = "smallvalue.cfg", memaslap_window_size = "10k", hibernate_at_end = True, ssh_username = "******", num_memaslaps = 1, num_memcacheds = 1, concurrency = 64, is_first_run=True ): experiment_runtime_string = "{}m".format(experiment_runtime) with fabric.api.settings(warn_only=True): fabric.api.local("rm -r {}/*".format(results_dir)) fabric.api.local("mkdir -p {}".format(results_dir)) fabric.api.local("mkdir {}/graphs".format(results_dir)) # region ---- Parameters ---- TOTAL_MACHINE_COUNT = 11 # this is fixed by the template resource_group_name = 'template11vms' my_pub_ssh_key_path = '~/.ssh/id_rsa_asl.pub' template_path = "azure-templates/template11vms.json" pub_ssh_key_path = os.path.expanduser(my_pub_ssh_key_path) with open(pub_ssh_key_path, 'r') as pub_ssh_file_fd: pub_ssh_key = pub_ssh_file_fd.read().strip() parameters = { "virtualMachines_name": "foraslvms", "virtualMachines_adminPassword": "******", "networkInterfaces_name": "MyNetworkInterface", "virtualNetworks_testeth_vnet_name": "MyVNet", "key": pub_ssh_key, "uniquedns": "pungast" } # endregion # Initialize the deployer class if is_first_run: self.deployer = Deployer(resource_group_name, template_path, parameters) self.deployer.deploy_wait() # region ---- Extract VMs' IPs and other information ---- vms = self.deployer.compute_client.virtual_machines.list(resource_group_name) vm_names = [] vm_types = [] public_hostnames = [] private_hostnames = [] for vm in vms: vm_type = vm.hardware_profile.vm_size vm_types.append(vm_type) vm_names.append(vm.name) self.log.info("VM {} [{}]".format(Colors.ok_blue(vm.name), vm_type)) # Get machine's public address that we can use for SSH-ing public_ip = self.deployer.network_client.public_ip_addresses.get(resource_group_name, vm.name) public_host_address = public_ip.dns_settings.fqdn public_hostnames.append(public_host_address) #self.log.info("Public host name: {}".format(Colors.ok_green(public_host_address))) # Get machine's private IP address network_interface_id = vm.network_profile.network_interfaces[0].id network_interface_name = network_interface_id.split("/")[-1] network_interface = self.deployer.network_client.network_interfaces.get(resource_group_name, network_interface_name) private_host_address = network_interface.ip_configurations[0].private_ip_address private_hostnames.append(private_host_address) #self.log.info("Private host name: {}".format(Colors.ok_green(private_host_address))) # endregion # region ---- Set up all machines ---- index_a4 = vm_types.index("Basic_A4") indices_smallmachines = list(range(TOTAL_MACHINE_COUNT)) indices_smallmachines.remove(index_a4) memcached_machines = [vm_names.index("foraslvms" + str(x)) for x in Experiment.default_memcached_machines()] memcached_machines = memcached_machines[0:num_memcacheds] memaslap_machines = [vm_names.index("foraslvms" + str(x)) for x in Experiment.default_memaslap_machines()] memaslap_machines = memaslap_machines[0:num_memaslaps] self.log.info("A4 machine: " + str(index_a4)) self.log.info("A2 machines: " + str(indices_smallmachines)) self.log.info("Memcached machines: " + str(memcached_machines)) self.log.info("Memaslap machines: " + str(memaslap_machines)) # Wait for all servers to be responsive if is_first_run: aslutil.wait_for_servers(ssh_username, public_hostnames, "~/.ssh/id_rsa_asl", self.log, check_every_n_sec=10) # Set up memcached servers memcached_port = 11211 mc_servers = [] mc_server_string_list = [] for i in memcached_machines: self.log.info("Setting up memcached on machine {} ({}).".format(i, vm_names[i])) mc_server = Memcached(memcached_port, public_hostnames[i], ssh_username=ssh_username, id_number=int(aslutil.server_name_to_number(vm_names[i]))) mc_servers.append(mc_server) mc_server_string_list.append("{}:{}".format(private_hostnames[i], memcached_port)) if update_and_install: mc_server.update_and_install() for s in mc_servers: s.start() sleep_for = 15 self.log.info("Sleeping for {} seconds so memcached servers can start...".format(sleep_for)) time.sleep(sleep_for) # Set up middleware server middleware_port = 11212 self.log.info("Setting up middleware on machine {} ({}).".format(index_a4, vm_names[index_a4])) mw_server = Middleware(public_hostnames[index_a4], private_hostnames[index_a4], middleware_port, num_threads_in_pool, replication_factor, mc_server_string_list, ssh_username=ssh_username) if update_and_install: mw_server.update_and_install() if is_first_run: mw_server.upload_jar() mw_server.clear_logs() mw_server.start() # Sleep a bit so middleware has time to start while not mw_server.is_running(): sleep_for = 5 self.log.info("Sleeping for {} seconds so middleware can start...".format(sleep_for)) time.sleep(sleep_for) time.sleep(10) # Set up memaslap servers ms_servers = [] first_memaslap = True for i in memaslap_machines: self.log.info("Setting up memaslap on machine {} ({}).".format(i, vm_names[i])) ms_server = Memaslap(public_hostnames[i], private_hostnames[index_a4], middleware_port, ssh_username=ssh_username, id_number=int(aslutil.server_name_to_number(vm_names[i]))) # i is zero-indexed ms_servers.append(ms_server) if is_first_run: ms_server.upload_resources() if update_and_install: if not first_memaslap: ms_server.upload_built_files() ms_server.update_and_install() if first_memaslap: ms_server.download_built_files() first_memaslap = False for s in ms_servers: s.clear_logs() s.start(runtime=experiment_runtime_string, log_filename="memaslap{}.out".format(s.id_number), stats_freq=stats_frequency, workload_filename=memaslap_workload, concurrency=concurrency, window_size=memaslap_window_size) # endregion sleep_time = experiment_runtime + runtime_buffer self.log.info("Waiting for the experiment to finish, sleeping for up to {} minutes.".format(sleep_time)) already_slept = 0 while True: sleep_interval = 30 time.sleep(sleep_interval) already_slept += sleep_interval num_running_memaslaps = sum([s.is_running() for s in ms_servers]) self.log.info("Waiting for the experiment to finish (total {} minutes), {:.0f}/{} minutes elapsed ({:.0f}%), {} memaslaps running." .format(sleep_time, already_slept / 60, experiment_runtime, 100 * already_slept / 60.0 / experiment_runtime, num_running_memaslaps)) if already_slept >= sleep_time * 60: self.log.info("Stopping because of time limit.") break if num_running_memaslaps == 0: self.log.info("Stopping because no memaslaps are left.") break # region ---- Kill everyone ---- # Memaslap for ms_server in ms_servers: ms_server.stop() # Middleware mw_server.stop() # Memcached for mc_server in mc_servers: mc_server.stop() # endregion # region ---- Download logs, extract data, plot ---- mw_server.download_logs(local_path=results_dir) for ms_server in ms_servers: ms_server.download_logs(local_path=results_dir) # endregion if hibernate_at_end: self.deployer.hibernate_wait() self.log.info("Done.")
#!/usr/bin/env python3 import sys sys.path.append("../deployment") from deployer import Deployer from web3_interface import Web3Interface from tx_checker import fails, succeeds from test_config import config_f import random web3 = Web3Interface().w3 eth = web3.eth web3.miner.start(1) deployer = Deployer() gas = 5000000 gas_price = 20000000000 owner = eth.accounts[0] release_agent = eth.accounts[1] non_owner = eth.accounts[2] transfer_agent = eth.accounts[3] non_transfer_agent = eth.accounts[4] non_release_agent = eth.accounts[5] contribution_range = (1, 2**256) def random_contribution(): return 0
#!/usr/bin/env python3 import sys sys.path.append("../deployment") from deployer import Deployer from web3_interface import Web3Interface from tx_checker import fails, succeeds web3 = Web3Interface().w3 web3.miner.start(1) owner = web3.eth.accounts[0] new_owner = web3.eth.accounts[1] tx = {'from': owner, 'gas': 100000000, 'gasPrice': 20000000000} deployer = Deployer() (haltable_contract, tx_hash) = deployer.deploy( "./build/", "Haltable", tx, ) receipt = web3.eth.waitForTransactionReceipt(tx_hash) assert receipt.status == 1 functions = haltable_contract.functions def halted(): return functions.halted().call() def get_owner(): return functions.owner().call()
pub_ssh_key_path = os.path.expanduser(my_pub_ssh_key_path) with open(pub_ssh_key_path, 'r') as pub_ssh_file_fd: pub_ssh_key = pub_ssh_file_fd.read().strip() parameters = { "virtualMachines_name": "foraslvms", "virtualMachines_adminPassword": "******", "networkInterfaces_name": "MyNetworkInterface", "virtualNetworks_testeth_vnet_name": virtual_network_name, "key": pub_ssh_key, "uniquedns": dns_label_prefix } # endregion # Initialize the deployer class deployer = Deployer(resource_group_name, template_path, parameters) deployer.deploy_wait() # region ---- Extract VMs' IPs ---- vms = deployer.compute_client.virtual_machines.list(resource_group_name) vm_names = [] public_hostnames = [] private_hostnames = [] for vm in vms: log.info("VM {}".format(Colors.ok_blue(vm.name))) # Get machine's public address that we can use for SSH-ing public_ip = deployer.network_client.public_ip_addresses.get(resource_group_name, vm.name) public_host_address = public_ip.dns_settings.fqdn public_hostnames.append(public_host_address) log.info("Public host name: {}".format(Colors.ok_green(public_host_address)))
my_subscription_id = os.environ.get( 'AZURE_SUBSCRIPTION_ID', '11111111-1111-1111-1111-111111111111') # your Azure Subscription Id my_resource_group = "rg-scale-{}".format( scaleNumber) # the resource group for deployment my_pub_ssh_key_path = os.path.expanduser( '~/.ssh/id_rsa.pub') # the path to your rsa public key file msg = "\nInitializing the Deployer class with subscription id: {}, resource group: {}" \ "\nand public key located at: {}...\n\n" msg = msg.format(my_subscription_id, my_resource_group, my_pub_ssh_key_path) print(msg) # Initialize the deployer class deployer = Deployer(my_subscription_id, my_resource_group, my_pub_ssh_key_path, scaleNumber) print("Beginning the deployment... \n\n") ## Deploy the template my_deployment = deployer.deploy() print( "Done deploying!!\n\nYou can connect via: `ssh {}@{}.westus2.cloudapp.azure.com`" .format(deployer.adminUsername, deployer.dns_label_prefix)) ## Stop Machines # deployer.stopMachines() ## Resume Machines # deployer.resumeMachines()
num_memcacheds=S, memaslap_workload=workload_filename, memaslap_window_size=memaslap_window_size, hibernate_at_end=hibernate_at_end, concurrency=concurrency, is_first_run=is_first) # Extract logs extractor.summarise_trace_logs( logs_pattern="{}/memaslap*.out".format(results_dir), csv_path="{}/{}".format(results_dir, memaslap_summary_filename)) # Plot graphs with fabric.api.settings(warn_only=True): fabric.api.local( "Rscript scripts/r/trace.r {}".format(results_dir)) is_first = False #Deployer.hibernate_wait_static("template11vms") except msrestazure.azure_exceptions.CloudError as e: print("DEPLOYMENT EXCEPTION " + e.__class__.__name__ + ": " + str(e)) if e.message.find("Unable to edit or replace deployment") == -1: Deployer.hibernate_wait_static("template11vms") except Exception as e: print("UNKNOWN DEPLOYMENT EXCEPTION " + e.__class__.__name__ + ": " + str(e)) Deployer.hibernate_wait_static("template11vms")
def deploy(is_test): deployer = Deployer(is_test) deployer.deploy() pass
# AZURE_SUBSCRIPTION_ID: your subscription id # AZURE_RESOURCE_LOCATION: with your azure stack resource location # this example assumes your ssh public key present here: ~/id_rsa.pub my_subscription_id = os.environ.get( 'AZURE_SUBSCRIPTION_ID') # your Azure Subscription Id my_resource_group = 'azure-python-deployment-sample' # the resource group for deployment my_pub_ssh_key_path = os.path.expanduser( '~/id_rsa.pub') # the path to your rsa public key file # Set Azure stack supported API profile as the default profile KnownProfiles.default.use(KnownProfiles.v2018_03_01_hybrid) msg = "\nInitializing the Deployer class with subscription id: {}, resource group: {}" \ "\nand public key located at: {}...\n\n" msg = msg.format(my_subscription_id, my_resource_group, my_pub_ssh_key_path) print(msg) # Initialize the deployer class deployer = Deployer(my_subscription_id, my_resource_group, my_pub_ssh_key_path) print("Beginning the deployment... \n\n") # Deploy the template my_deployment = deployer.deploy() print( "Done deploying!!\n\nYou can connect via: `ssh azureSample@{}.local.cloudapp.azurestack.external`" .format(deployer.dns_label_prefix)) # Destroy the resource group which contains the deployment # deployer.destroy()
import os.path from deployer import Deployer # This script expects that the following environment vars are set: # # AZURE_TENANT_ID: with your Azure Active Directory tenant id or domain # AZURE_CLIENT_ID: with your Azure Active Directory Application Client ID # AZURE_CLIENT_SECRET: with your Azure Active Directory Application Secret my_subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', '11111111-1111-1111-1111-111111111111') # your Azure Subscription Id my_resource_group = 'azure-python-deployment-sample' # the resource group for deployment my_pub_ssh_key_path = os.path.expanduser('~/.ssh/id_rsa.pub') # the path to your rsa public key file msg = "\nInitializing the Deployer class with subscription id: {}, resource group: {}" \ "\nand public key located at: {}...\n\n" msg = msg.format(my_subscription_id, my_resource_group, my_pub_ssh_key_path) print(msg) # Initialize the deployer class deployer = Deployer(my_subscription_id, my_resource_group, my_pub_ssh_key_path) print("Beginning the deployment... \n\n") # Deploy the template my_deployment = deployer.deploy() print("Done deploying!!\n\nYou can connect via: `ssh azureSample@{}.westus.cloudapp.azure.com`".format(deployer.dns_label_prefix)) # Destroy the resource group which contains the deployment # deployer.destroy()
def build(config: dict, args: argparse.Namespace) -> None: model = args.model group = args.group pipeline = args.pipeline if group: models = config['model_groups'].get(group) if not models: print(f'Group {group} does not exist or empty') return elif model: models = [model] else: print('Please, specify group or model full name') return absent_models = set(models) - set(config['models'].keys()) if len(absent_models) > 0: absent_models = ', '.join(absent_models) print(f'Unknown model full names: {absent_models}') return if pipeline and pipeline not in preset_pipelines.keys(): print(f'Unknown pipeline name: {pipeline}') return elif pipeline: for model in models: config['models'][model]['pipeline'] = pipeline else: absent_pipeline_models = [] for model in models: if config['models'][model].get( 'pipeline') not in preset_pipelines.keys(): absent_pipeline_models.append(model) if absent_pipeline_models: absent_pipeline_models = ', '.join(absent_pipeline_models) print( f'Incorrect or absent pipeline names for: {absent_pipeline_models}' ) return # Test Docker Hub authentication dockerhub_password = args.dockerhub_pass if not dockerhub_password: prompt_text = 'Docker Hub password was not entered, would you like for proceed without Docker Hub login?' if not prompt_confirmation(prompt_text): return else: try: client: DockerClient = DockerClient( base_url=config['docker_base_url']) client.login(config['dockerhub_registry'], dockerhub_password) except APIError as e: print(e) prompt_text = 'Docker Hub login error occurred, would you like for proceed without Docker Hub login?' if not prompt_confirmation(prompt_text): return config['dockerhub_password'] = dockerhub_password deployer = Deployer(config) deployer.deploy(models)
def deploy(self): deployer = Deployer(self.project) deployer.deploy() return self.project